##// END OF EJS Templates
perf: add a new `perfmergecopies` command...
marmoute -
r42576:f5f0a949 default
parent child Browse files
Show More
@@ -1,2926 +1,2944 b''
1 1 # perf.py - performance test routines
2 2 '''helper extension to measure performance
3 3
4 4 Configurations
5 5 ==============
6 6
7 7 ``perf``
8 8 --------
9 9
10 10 ``all-timing``
11 11 When set, additional statistics will be reported for each benchmark: best,
12 12 worst, median average. If not set only the best timing is reported
13 13 (default: off).
14 14
15 15 ``presleep``
16 16 number of second to wait before any group of runs (default: 1)
17 17
18 18 ``pre-run``
19 19 number of run to perform before starting measurement.
20 20
21 21 ``profile-benchmark``
22 22 Enable profiling for the benchmarked section.
23 23 (The first iteration is benchmarked)
24 24
25 25 ``run-limits``
26 26 Control the number of runs each benchmark will perform. The option value
27 27 should be a list of `<time>-<numberofrun>` pairs. After each run the
28 28 conditions are considered in order with the following logic:
29 29
30 30 If benchmark has been running for <time> seconds, and we have performed
31 31 <numberofrun> iterations, stop the benchmark,
32 32
33 33 The default value is: `3.0-100, 10.0-3`
34 34
35 35 ``stub``
36 36 When set, benchmarks will only be run once, useful for testing
37 37 (default: off)
38 38 '''
39 39
40 40 # "historical portability" policy of perf.py:
41 41 #
42 42 # We have to do:
43 43 # - make perf.py "loadable" with as wide Mercurial version as possible
44 44 # This doesn't mean that perf commands work correctly with that Mercurial.
45 45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
46 46 # - make historical perf command work correctly with as wide Mercurial
47 47 # version as possible
48 48 #
49 49 # We have to do, if possible with reasonable cost:
50 50 # - make recent perf command for historical feature work correctly
51 51 # with early Mercurial
52 52 #
53 53 # We don't have to do:
54 54 # - make perf command for recent feature work correctly with early
55 55 # Mercurial
56 56
57 57 from __future__ import absolute_import
58 58 import contextlib
59 59 import functools
60 60 import gc
61 61 import os
62 62 import random
63 63 import shutil
64 64 import struct
65 65 import sys
66 66 import tempfile
67 67 import threading
68 68 import time
69 69 from mercurial import (
70 70 changegroup,
71 71 cmdutil,
72 72 commands,
73 73 copies,
74 74 error,
75 75 extensions,
76 76 hg,
77 77 mdiff,
78 78 merge,
79 79 revlog,
80 80 util,
81 81 )
82 82
83 83 # for "historical portability":
84 84 # try to import modules separately (in dict order), and ignore
85 85 # failure, because these aren't available with early Mercurial
86 86 try:
87 87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
88 88 except ImportError:
89 89 pass
90 90 try:
91 91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
92 92 except ImportError:
93 93 pass
94 94 try:
95 95 from mercurial import registrar # since 3.7 (or 37d50250b696)
96 96 dir(registrar) # forcibly load it
97 97 except ImportError:
98 98 registrar = None
99 99 try:
100 100 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
101 101 except ImportError:
102 102 pass
103 103 try:
104 104 from mercurial.utils import repoviewutil # since 5.0
105 105 except ImportError:
106 106 repoviewutil = None
107 107 try:
108 108 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
109 109 except ImportError:
110 110 pass
111 111 try:
112 112 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
113 113 except ImportError:
114 114 pass
115 115
116 116 try:
117 117 from mercurial import profiling
118 118 except ImportError:
119 119 profiling = None
120 120
121 121 def identity(a):
122 122 return a
123 123
124 124 try:
125 125 from mercurial import pycompat
126 126 getargspec = pycompat.getargspec # added to module after 4.5
127 127 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
128 128 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
129 129 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
130 130 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
131 131 if pycompat.ispy3:
132 132 _maxint = sys.maxsize # per py3 docs for replacing maxint
133 133 else:
134 134 _maxint = sys.maxint
135 135 except (ImportError, AttributeError):
136 136 import inspect
137 137 getargspec = inspect.getargspec
138 138 _byteskwargs = identity
139 139 fsencode = identity # no py3 support
140 140 _maxint = sys.maxint # no py3 support
141 141 _sysstr = lambda x: x # no py3 support
142 142 _xrange = xrange
143 143
144 144 try:
145 145 # 4.7+
146 146 queue = pycompat.queue.Queue
147 147 except (AttributeError, ImportError):
148 148 # <4.7.
149 149 try:
150 150 queue = pycompat.queue
151 151 except (AttributeError, ImportError):
152 152 queue = util.queue
153 153
154 154 try:
155 155 from mercurial import logcmdutil
156 156 makelogtemplater = logcmdutil.maketemplater
157 157 except (AttributeError, ImportError):
158 158 try:
159 159 makelogtemplater = cmdutil.makelogtemplater
160 160 except (AttributeError, ImportError):
161 161 makelogtemplater = None
162 162
163 163 # for "historical portability":
164 164 # define util.safehasattr forcibly, because util.safehasattr has been
165 165 # available since 1.9.3 (or 94b200a11cf7)
166 166 _undefined = object()
167 167 def safehasattr(thing, attr):
168 168 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
169 169 setattr(util, 'safehasattr', safehasattr)
170 170
171 171 # for "historical portability":
172 172 # define util.timer forcibly, because util.timer has been available
173 173 # since ae5d60bb70c9
174 174 if safehasattr(time, 'perf_counter'):
175 175 util.timer = time.perf_counter
176 176 elif os.name == b'nt':
177 177 util.timer = time.clock
178 178 else:
179 179 util.timer = time.time
180 180
181 181 # for "historical portability":
182 182 # use locally defined empty option list, if formatteropts isn't
183 183 # available, because commands.formatteropts has been available since
184 184 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
185 185 # available since 2.2 (or ae5f92e154d3)
186 186 formatteropts = getattr(cmdutil, "formatteropts",
187 187 getattr(commands, "formatteropts", []))
188 188
189 189 # for "historical portability":
190 190 # use locally defined option list, if debugrevlogopts isn't available,
191 191 # because commands.debugrevlogopts has been available since 3.7 (or
192 192 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
193 193 # since 1.9 (or a79fea6b3e77).
194 194 revlogopts = getattr(cmdutil, "debugrevlogopts",
195 195 getattr(commands, "debugrevlogopts", [
196 196 (b'c', b'changelog', False, (b'open changelog')),
197 197 (b'm', b'manifest', False, (b'open manifest')),
198 198 (b'', b'dir', False, (b'open directory manifest')),
199 199 ]))
200 200
201 201 cmdtable = {}
202 202
203 203 # for "historical portability":
204 204 # define parsealiases locally, because cmdutil.parsealiases has been
205 205 # available since 1.5 (or 6252852b4332)
206 206 def parsealiases(cmd):
207 207 return cmd.split(b"|")
208 208
209 209 if safehasattr(registrar, 'command'):
210 210 command = registrar.command(cmdtable)
211 211 elif safehasattr(cmdutil, 'command'):
212 212 command = cmdutil.command(cmdtable)
213 213 if b'norepo' not in getargspec(command).args:
214 214 # for "historical portability":
215 215 # wrap original cmdutil.command, because "norepo" option has
216 216 # been available since 3.1 (or 75a96326cecb)
217 217 _command = command
218 218 def command(name, options=(), synopsis=None, norepo=False):
219 219 if norepo:
220 220 commands.norepo += b' %s' % b' '.join(parsealiases(name))
221 221 return _command(name, list(options), synopsis)
222 222 else:
223 223 # for "historical portability":
224 224 # define "@command" annotation locally, because cmdutil.command
225 225 # has been available since 1.9 (or 2daa5179e73f)
226 226 def command(name, options=(), synopsis=None, norepo=False):
227 227 def decorator(func):
228 228 if synopsis:
229 229 cmdtable[name] = func, list(options), synopsis
230 230 else:
231 231 cmdtable[name] = func, list(options)
232 232 if norepo:
233 233 commands.norepo += b' %s' % b' '.join(parsealiases(name))
234 234 return func
235 235 return decorator
236 236
237 237 try:
238 238 import mercurial.registrar
239 239 import mercurial.configitems
240 240 configtable = {}
241 241 configitem = mercurial.registrar.configitem(configtable)
242 242 configitem(b'perf', b'presleep',
243 243 default=mercurial.configitems.dynamicdefault,
244 244 )
245 245 configitem(b'perf', b'stub',
246 246 default=mercurial.configitems.dynamicdefault,
247 247 )
248 248 configitem(b'perf', b'parentscount',
249 249 default=mercurial.configitems.dynamicdefault,
250 250 )
251 251 configitem(b'perf', b'all-timing',
252 252 default=mercurial.configitems.dynamicdefault,
253 253 )
254 254 configitem(b'perf', b'pre-run',
255 255 default=mercurial.configitems.dynamicdefault,
256 256 )
257 257 configitem(b'perf', b'profile-benchmark',
258 258 default=mercurial.configitems.dynamicdefault,
259 259 )
260 260 configitem(b'perf', b'run-limits',
261 261 default=mercurial.configitems.dynamicdefault,
262 262 )
263 263 except (ImportError, AttributeError):
264 264 pass
265 265
266 266 def getlen(ui):
267 267 if ui.configbool(b"perf", b"stub", False):
268 268 return lambda x: 1
269 269 return len
270 270
271 271 class noop(object):
272 272 """dummy context manager"""
273 273 def __enter__(self):
274 274 pass
275 275 def __exit__(self, *args):
276 276 pass
277 277
278 278 NOOPCTX = noop()
279 279
280 280 def gettimer(ui, opts=None):
281 281 """return a timer function and formatter: (timer, formatter)
282 282
283 283 This function exists to gather the creation of formatter in a single
284 284 place instead of duplicating it in all performance commands."""
285 285
286 286 # enforce an idle period before execution to counteract power management
287 287 # experimental config: perf.presleep
288 288 time.sleep(getint(ui, b"perf", b"presleep", 1))
289 289
290 290 if opts is None:
291 291 opts = {}
292 292 # redirect all to stderr unless buffer api is in use
293 293 if not ui._buffers:
294 294 ui = ui.copy()
295 295 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
296 296 if uifout:
297 297 # for "historical portability":
298 298 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
299 299 uifout.set(ui.ferr)
300 300
301 301 # get a formatter
302 302 uiformatter = getattr(ui, 'formatter', None)
303 303 if uiformatter:
304 304 fm = uiformatter(b'perf', opts)
305 305 else:
306 306 # for "historical portability":
307 307 # define formatter locally, because ui.formatter has been
308 308 # available since 2.2 (or ae5f92e154d3)
309 309 from mercurial import node
310 310 class defaultformatter(object):
311 311 """Minimized composition of baseformatter and plainformatter
312 312 """
313 313 def __init__(self, ui, topic, opts):
314 314 self._ui = ui
315 315 if ui.debugflag:
316 316 self.hexfunc = node.hex
317 317 else:
318 318 self.hexfunc = node.short
319 319 def __nonzero__(self):
320 320 return False
321 321 __bool__ = __nonzero__
322 322 def startitem(self):
323 323 pass
324 324 def data(self, **data):
325 325 pass
326 326 def write(self, fields, deftext, *fielddata, **opts):
327 327 self._ui.write(deftext % fielddata, **opts)
328 328 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
329 329 if cond:
330 330 self._ui.write(deftext % fielddata, **opts)
331 331 def plain(self, text, **opts):
332 332 self._ui.write(text, **opts)
333 333 def end(self):
334 334 pass
335 335 fm = defaultformatter(ui, b'perf', opts)
336 336
337 337 # stub function, runs code only once instead of in a loop
338 338 # experimental config: perf.stub
339 339 if ui.configbool(b"perf", b"stub", False):
340 340 return functools.partial(stub_timer, fm), fm
341 341
342 342 # experimental config: perf.all-timing
343 343 displayall = ui.configbool(b"perf", b"all-timing", False)
344 344
345 345 # experimental config: perf.run-limits
346 346 limitspec = ui.configlist(b"perf", b"run-limits", [])
347 347 limits = []
348 348 for item in limitspec:
349 349 parts = item.split(b'-', 1)
350 350 if len(parts) < 2:
351 351 ui.warn((b'malformatted run limit entry, missing "-": %s\n'
352 352 % item))
353 353 continue
354 354 try:
355 355 time_limit = float(pycompat.sysstr(parts[0]))
356 356 except ValueError as e:
357 357 ui.warn((b'malformatted run limit entry, %s: %s\n'
358 358 % (pycompat.bytestr(e), item)))
359 359 continue
360 360 try:
361 361 run_limit = int(pycompat.sysstr(parts[1]))
362 362 except ValueError as e:
363 363 ui.warn((b'malformatted run limit entry, %s: %s\n'
364 364 % (pycompat.bytestr(e), item)))
365 365 continue
366 366 limits.append((time_limit, run_limit))
367 367 if not limits:
368 368 limits = DEFAULTLIMITS
369 369
370 370 profiler = None
371 371 if profiling is not None:
372 372 if ui.configbool(b"perf", b"profile-benchmark", False):
373 373 profiler = profiling.profile(ui)
374 374
375 375 prerun = getint(ui, b"perf", b"pre-run", 0)
376 376 t = functools.partial(_timer, fm, displayall=displayall, limits=limits,
377 377 prerun=prerun, profiler=profiler)
378 378 return t, fm
379 379
380 380 def stub_timer(fm, func, setup=None, title=None):
381 381 if setup is not None:
382 382 setup()
383 383 func()
384 384
385 385 @contextlib.contextmanager
386 386 def timeone():
387 387 r = []
388 388 ostart = os.times()
389 389 cstart = util.timer()
390 390 yield r
391 391 cstop = util.timer()
392 392 ostop = os.times()
393 393 a, b = ostart, ostop
394 394 r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
395 395
396 396
397 397 # list of stop condition (elapsed time, minimal run count)
398 398 DEFAULTLIMITS = (
399 399 (3.0, 100),
400 400 (10.0, 3),
401 401 )
402 402
403 403 def _timer(fm, func, setup=None, title=None, displayall=False,
404 404 limits=DEFAULTLIMITS, prerun=0, profiler=None):
405 405 gc.collect()
406 406 results = []
407 407 begin = util.timer()
408 408 count = 0
409 409 if profiler is None:
410 410 profiler = NOOPCTX
411 411 for i in range(prerun):
412 412 if setup is not None:
413 413 setup()
414 414 func()
415 415 keepgoing = True
416 416 while keepgoing:
417 417 if setup is not None:
418 418 setup()
419 419 with profiler:
420 420 with timeone() as item:
421 421 r = func()
422 422 profiler = NOOPCTX
423 423 count += 1
424 424 results.append(item[0])
425 425 cstop = util.timer()
426 426 # Look for a stop condition.
427 427 elapsed = cstop - begin
428 428 for t, mincount in limits:
429 429 if elapsed >= t and count >= mincount:
430 430 keepgoing = False
431 431 break
432 432
433 433 formatone(fm, results, title=title, result=r,
434 434 displayall=displayall)
435 435
436 436 def formatone(fm, timings, title=None, result=None, displayall=False):
437 437
438 438 count = len(timings)
439 439
440 440 fm.startitem()
441 441
442 442 if title:
443 443 fm.write(b'title', b'! %s\n', title)
444 444 if result:
445 445 fm.write(b'result', b'! result: %s\n', result)
446 446 def display(role, entry):
447 447 prefix = b''
448 448 if role != b'best':
449 449 prefix = b'%s.' % role
450 450 fm.plain(b'!')
451 451 fm.write(prefix + b'wall', b' wall %f', entry[0])
452 452 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
453 453 fm.write(prefix + b'user', b' user %f', entry[1])
454 454 fm.write(prefix + b'sys', b' sys %f', entry[2])
455 455 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
456 456 fm.plain(b'\n')
457 457 timings.sort()
458 458 min_val = timings[0]
459 459 display(b'best', min_val)
460 460 if displayall:
461 461 max_val = timings[-1]
462 462 display(b'max', max_val)
463 463 avg = tuple([sum(x) / count for x in zip(*timings)])
464 464 display(b'avg', avg)
465 465 median = timings[len(timings) // 2]
466 466 display(b'median', median)
467 467
468 468 # utilities for historical portability
469 469
470 470 def getint(ui, section, name, default):
471 471 # for "historical portability":
472 472 # ui.configint has been available since 1.9 (or fa2b596db182)
473 473 v = ui.config(section, name, None)
474 474 if v is None:
475 475 return default
476 476 try:
477 477 return int(v)
478 478 except ValueError:
479 479 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
480 480 % (section, name, v))
481 481
482 482 def safeattrsetter(obj, name, ignoremissing=False):
483 483 """Ensure that 'obj' has 'name' attribute before subsequent setattr
484 484
485 485 This function is aborted, if 'obj' doesn't have 'name' attribute
486 486 at runtime. This avoids overlooking removal of an attribute, which
487 487 breaks assumption of performance measurement, in the future.
488 488
489 489 This function returns the object to (1) assign a new value, and
490 490 (2) restore an original value to the attribute.
491 491
492 492 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
493 493 abortion, and this function returns None. This is useful to
494 494 examine an attribute, which isn't ensured in all Mercurial
495 495 versions.
496 496 """
497 497 if not util.safehasattr(obj, name):
498 498 if ignoremissing:
499 499 return None
500 500 raise error.Abort((b"missing attribute %s of %s might break assumption"
501 501 b" of performance measurement") % (name, obj))
502 502
503 503 origvalue = getattr(obj, _sysstr(name))
504 504 class attrutil(object):
505 505 def set(self, newvalue):
506 506 setattr(obj, _sysstr(name), newvalue)
507 507 def restore(self):
508 508 setattr(obj, _sysstr(name), origvalue)
509 509
510 510 return attrutil()
511 511
512 512 # utilities to examine each internal API changes
513 513
514 514 def getbranchmapsubsettable():
515 515 # for "historical portability":
516 516 # subsettable is defined in:
517 517 # - branchmap since 2.9 (or 175c6fd8cacc)
518 518 # - repoview since 2.5 (or 59a9f18d4587)
519 519 # - repoviewutil since 5.0
520 520 for mod in (branchmap, repoview, repoviewutil):
521 521 subsettable = getattr(mod, 'subsettable', None)
522 522 if subsettable:
523 523 return subsettable
524 524
525 525 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
526 526 # branchmap and repoview modules exist, but subsettable attribute
527 527 # doesn't)
528 528 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
529 529 hint=b"use 2.5 or later")
530 530
531 531 def getsvfs(repo):
532 532 """Return appropriate object to access files under .hg/store
533 533 """
534 534 # for "historical portability":
535 535 # repo.svfs has been available since 2.3 (or 7034365089bf)
536 536 svfs = getattr(repo, 'svfs', None)
537 537 if svfs:
538 538 return svfs
539 539 else:
540 540 return getattr(repo, 'sopener')
541 541
542 542 def getvfs(repo):
543 543 """Return appropriate object to access files under .hg
544 544 """
545 545 # for "historical portability":
546 546 # repo.vfs has been available since 2.3 (or 7034365089bf)
547 547 vfs = getattr(repo, 'vfs', None)
548 548 if vfs:
549 549 return vfs
550 550 else:
551 551 return getattr(repo, 'opener')
552 552
553 553 def repocleartagscachefunc(repo):
554 554 """Return the function to clear tags cache according to repo internal API
555 555 """
556 556 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
557 557 # in this case, setattr(repo, '_tagscache', None) or so isn't
558 558 # correct way to clear tags cache, because existing code paths
559 559 # expect _tagscache to be a structured object.
560 560 def clearcache():
561 561 # _tagscache has been filteredpropertycache since 2.5 (or
562 562 # 98c867ac1330), and delattr() can't work in such case
563 563 if b'_tagscache' in vars(repo):
564 564 del repo.__dict__[b'_tagscache']
565 565 return clearcache
566 566
567 567 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
568 568 if repotags: # since 1.4 (or 5614a628d173)
569 569 return lambda : repotags.set(None)
570 570
571 571 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
572 572 if repotagscache: # since 0.6 (or d7df759d0e97)
573 573 return lambda : repotagscache.set(None)
574 574
575 575 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
576 576 # this point, but it isn't so problematic, because:
577 577 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
578 578 # in perftags() causes failure soon
579 579 # - perf.py itself has been available since 1.1 (or eb240755386d)
580 580 raise error.Abort((b"tags API of this hg command is unknown"))
581 581
582 582 # utilities to clear cache
583 583
584 584 def clearfilecache(obj, attrname):
585 585 unfiltered = getattr(obj, 'unfiltered', None)
586 586 if unfiltered is not None:
587 587 obj = obj.unfiltered()
588 588 if attrname in vars(obj):
589 589 delattr(obj, attrname)
590 590 obj._filecache.pop(attrname, None)
591 591
592 592 def clearchangelog(repo):
593 593 if repo is not repo.unfiltered():
594 594 object.__setattr__(repo, r'_clcachekey', None)
595 595 object.__setattr__(repo, r'_clcache', None)
596 596 clearfilecache(repo.unfiltered(), 'changelog')
597 597
598 598 # perf commands
599 599
600 600 @command(b'perfwalk', formatteropts)
601 601 def perfwalk(ui, repo, *pats, **opts):
602 602 opts = _byteskwargs(opts)
603 603 timer, fm = gettimer(ui, opts)
604 604 m = scmutil.match(repo[None], pats, {})
605 605 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
606 606 ignored=False))))
607 607 fm.end()
608 608
609 609 @command(b'perfannotate', formatteropts)
610 610 def perfannotate(ui, repo, f, **opts):
611 611 opts = _byteskwargs(opts)
612 612 timer, fm = gettimer(ui, opts)
613 613 fc = repo[b'.'][f]
614 614 timer(lambda: len(fc.annotate(True)))
615 615 fm.end()
616 616
617 617 @command(b'perfstatus',
618 618 [(b'u', b'unknown', False,
619 619 b'ask status to look for unknown files')] + formatteropts)
620 620 def perfstatus(ui, repo, **opts):
621 621 opts = _byteskwargs(opts)
622 622 #m = match.always(repo.root, repo.getcwd())
623 623 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
624 624 # False))))
625 625 timer, fm = gettimer(ui, opts)
626 626 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
627 627 fm.end()
628 628
629 629 @command(b'perfaddremove', formatteropts)
630 630 def perfaddremove(ui, repo, **opts):
631 631 opts = _byteskwargs(opts)
632 632 timer, fm = gettimer(ui, opts)
633 633 try:
634 634 oldquiet = repo.ui.quiet
635 635 repo.ui.quiet = True
636 636 matcher = scmutil.match(repo[None])
637 637 opts[b'dry_run'] = True
638 638 if b'uipathfn' in getargspec(scmutil.addremove).args:
639 639 uipathfn = scmutil.getuipathfn(repo)
640 640 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
641 641 else:
642 642 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
643 643 finally:
644 644 repo.ui.quiet = oldquiet
645 645 fm.end()
646 646
647 647 def clearcaches(cl):
648 648 # behave somewhat consistently across internal API changes
649 649 if util.safehasattr(cl, b'clearcaches'):
650 650 cl.clearcaches()
651 651 elif util.safehasattr(cl, b'_nodecache'):
652 652 from mercurial.node import nullid, nullrev
653 653 cl._nodecache = {nullid: nullrev}
654 654 cl._nodepos = None
655 655
656 656 @command(b'perfheads', formatteropts)
657 657 def perfheads(ui, repo, **opts):
658 658 """benchmark the computation of a changelog heads"""
659 659 opts = _byteskwargs(opts)
660 660 timer, fm = gettimer(ui, opts)
661 661 cl = repo.changelog
662 662 def s():
663 663 clearcaches(cl)
664 664 def d():
665 665 len(cl.headrevs())
666 666 timer(d, setup=s)
667 667 fm.end()
668 668
669 669 @command(b'perftags', formatteropts+
670 670 [
671 671 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
672 672 ])
673 673 def perftags(ui, repo, **opts):
674 674 opts = _byteskwargs(opts)
675 675 timer, fm = gettimer(ui, opts)
676 676 repocleartagscache = repocleartagscachefunc(repo)
677 677 clearrevlogs = opts[b'clear_revlogs']
678 678 def s():
679 679 if clearrevlogs:
680 680 clearchangelog(repo)
681 681 clearfilecache(repo.unfiltered(), 'manifest')
682 682 repocleartagscache()
683 683 def t():
684 684 return len(repo.tags())
685 685 timer(t, setup=s)
686 686 fm.end()
687 687
688 688 @command(b'perfancestors', formatteropts)
689 689 def perfancestors(ui, repo, **opts):
690 690 opts = _byteskwargs(opts)
691 691 timer, fm = gettimer(ui, opts)
692 692 heads = repo.changelog.headrevs()
693 693 def d():
694 694 for a in repo.changelog.ancestors(heads):
695 695 pass
696 696 timer(d)
697 697 fm.end()
698 698
699 699 @command(b'perfancestorset', formatteropts)
700 700 def perfancestorset(ui, repo, revset, **opts):
701 701 opts = _byteskwargs(opts)
702 702 timer, fm = gettimer(ui, opts)
703 703 revs = repo.revs(revset)
704 704 heads = repo.changelog.headrevs()
705 705 def d():
706 706 s = repo.changelog.ancestors(heads)
707 707 for rev in revs:
708 708 rev in s
709 709 timer(d)
710 710 fm.end()
711 711
712 712 @command(b'perfdiscovery', formatteropts, b'PATH')
713 713 def perfdiscovery(ui, repo, path, **opts):
714 714 """benchmark discovery between local repo and the peer at given path
715 715 """
716 716 repos = [repo, None]
717 717 timer, fm = gettimer(ui, opts)
718 718 path = ui.expandpath(path)
719 719
720 720 def s():
721 721 repos[1] = hg.peer(ui, opts, path)
722 722 def d():
723 723 setdiscovery.findcommonheads(ui, *repos)
724 724 timer(d, setup=s)
725 725 fm.end()
726 726
727 727 @command(b'perfbookmarks', formatteropts +
728 728 [
729 729 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
730 730 ])
731 731 def perfbookmarks(ui, repo, **opts):
732 732 """benchmark parsing bookmarks from disk to memory"""
733 733 opts = _byteskwargs(opts)
734 734 timer, fm = gettimer(ui, opts)
735 735
736 736 clearrevlogs = opts[b'clear_revlogs']
737 737 def s():
738 738 if clearrevlogs:
739 739 clearchangelog(repo)
740 740 clearfilecache(repo, b'_bookmarks')
741 741 def d():
742 742 repo._bookmarks
743 743 timer(d, setup=s)
744 744 fm.end()
745 745
746 746 @command(b'perfbundleread', formatteropts, b'BUNDLE')
747 747 def perfbundleread(ui, repo, bundlepath, **opts):
748 748 """Benchmark reading of bundle files.
749 749
750 750 This command is meant to isolate the I/O part of bundle reading as
751 751 much as possible.
752 752 """
753 753 from mercurial import (
754 754 bundle2,
755 755 exchange,
756 756 streamclone,
757 757 )
758 758
759 759 opts = _byteskwargs(opts)
760 760
761 761 def makebench(fn):
762 762 def run():
763 763 with open(bundlepath, b'rb') as fh:
764 764 bundle = exchange.readbundle(ui, fh, bundlepath)
765 765 fn(bundle)
766 766
767 767 return run
768 768
769 769 def makereadnbytes(size):
770 770 def run():
771 771 with open(bundlepath, b'rb') as fh:
772 772 bundle = exchange.readbundle(ui, fh, bundlepath)
773 773 while bundle.read(size):
774 774 pass
775 775
776 776 return run
777 777
778 778 def makestdioread(size):
779 779 def run():
780 780 with open(bundlepath, b'rb') as fh:
781 781 while fh.read(size):
782 782 pass
783 783
784 784 return run
785 785
786 786 # bundle1
787 787
788 788 def deltaiter(bundle):
789 789 for delta in bundle.deltaiter():
790 790 pass
791 791
792 792 def iterchunks(bundle):
793 793 for chunk in bundle.getchunks():
794 794 pass
795 795
796 796 # bundle2
797 797
798 798 def forwardchunks(bundle):
799 799 for chunk in bundle._forwardchunks():
800 800 pass
801 801
802 802 def iterparts(bundle):
803 803 for part in bundle.iterparts():
804 804 pass
805 805
806 806 def iterpartsseekable(bundle):
807 807 for part in bundle.iterparts(seekable=True):
808 808 pass
809 809
810 810 def seek(bundle):
811 811 for part in bundle.iterparts(seekable=True):
812 812 part.seek(0, os.SEEK_END)
813 813
814 814 def makepartreadnbytes(size):
815 815 def run():
816 816 with open(bundlepath, b'rb') as fh:
817 817 bundle = exchange.readbundle(ui, fh, bundlepath)
818 818 for part in bundle.iterparts():
819 819 while part.read(size):
820 820 pass
821 821
822 822 return run
823 823
824 824 benches = [
825 825 (makestdioread(8192), b'read(8k)'),
826 826 (makestdioread(16384), b'read(16k)'),
827 827 (makestdioread(32768), b'read(32k)'),
828 828 (makestdioread(131072), b'read(128k)'),
829 829 ]
830 830
831 831 with open(bundlepath, b'rb') as fh:
832 832 bundle = exchange.readbundle(ui, fh, bundlepath)
833 833
834 834 if isinstance(bundle, changegroup.cg1unpacker):
835 835 benches.extend([
836 836 (makebench(deltaiter), b'cg1 deltaiter()'),
837 837 (makebench(iterchunks), b'cg1 getchunks()'),
838 838 (makereadnbytes(8192), b'cg1 read(8k)'),
839 839 (makereadnbytes(16384), b'cg1 read(16k)'),
840 840 (makereadnbytes(32768), b'cg1 read(32k)'),
841 841 (makereadnbytes(131072), b'cg1 read(128k)'),
842 842 ])
843 843 elif isinstance(bundle, bundle2.unbundle20):
844 844 benches.extend([
845 845 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
846 846 (makebench(iterparts), b'bundle2 iterparts()'),
847 847 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
848 848 (makebench(seek), b'bundle2 part seek()'),
849 849 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
850 850 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
851 851 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
852 852 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
853 853 ])
854 854 elif isinstance(bundle, streamclone.streamcloneapplier):
855 855 raise error.Abort(b'stream clone bundles not supported')
856 856 else:
857 857 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
858 858
859 859 for fn, title in benches:
860 860 timer, fm = gettimer(ui, opts)
861 861 timer(fn, title=title)
862 862 fm.end()
863 863
864 864 @command(b'perfchangegroupchangelog', formatteropts +
865 865 [(b'', b'cgversion', b'02', b'changegroup version'),
866 866 (b'r', b'rev', b'', b'revisions to add to changegroup')])
867 867 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
868 868 """Benchmark producing a changelog group for a changegroup.
869 869
870 870 This measures the time spent processing the changelog during a
871 871 bundle operation. This occurs during `hg bundle` and on a server
872 872 processing a `getbundle` wire protocol request (handles clones
873 873 and pull requests).
874 874
875 875 By default, all revisions are added to the changegroup.
876 876 """
877 877 opts = _byteskwargs(opts)
878 878 cl = repo.changelog
879 879 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
880 880 bundler = changegroup.getbundler(cgversion, repo)
881 881
882 882 def d():
883 883 state, chunks = bundler._generatechangelog(cl, nodes)
884 884 for chunk in chunks:
885 885 pass
886 886
887 887 timer, fm = gettimer(ui, opts)
888 888
889 889 # Terminal printing can interfere with timing. So disable it.
890 890 with ui.configoverride({(b'progress', b'disable'): True}):
891 891 timer(d)
892 892
893 893 fm.end()
894 894
895 895 @command(b'perfdirs', formatteropts)
896 896 def perfdirs(ui, repo, **opts):
897 897 opts = _byteskwargs(opts)
898 898 timer, fm = gettimer(ui, opts)
899 899 dirstate = repo.dirstate
900 900 b'a' in dirstate
901 901 def d():
902 902 dirstate.hasdir(b'a')
903 903 del dirstate._map._dirs
904 904 timer(d)
905 905 fm.end()
906 906
907 907 @command(b'perfdirstate', formatteropts)
908 908 def perfdirstate(ui, repo, **opts):
909 909 opts = _byteskwargs(opts)
910 910 timer, fm = gettimer(ui, opts)
911 911 b"a" in repo.dirstate
912 912 def d():
913 913 repo.dirstate.invalidate()
914 914 b"a" in repo.dirstate
915 915 timer(d)
916 916 fm.end()
917 917
918 918 @command(b'perfdirstatedirs', formatteropts)
919 919 def perfdirstatedirs(ui, repo, **opts):
920 920 opts = _byteskwargs(opts)
921 921 timer, fm = gettimer(ui, opts)
922 922 b"a" in repo.dirstate
923 923 def d():
924 924 repo.dirstate.hasdir(b"a")
925 925 del repo.dirstate._map._dirs
926 926 timer(d)
927 927 fm.end()
928 928
929 929 @command(b'perfdirstatefoldmap', formatteropts)
930 930 def perfdirstatefoldmap(ui, repo, **opts):
931 931 opts = _byteskwargs(opts)
932 932 timer, fm = gettimer(ui, opts)
933 933 dirstate = repo.dirstate
934 934 b'a' in dirstate
935 935 def d():
936 936 dirstate._map.filefoldmap.get(b'a')
937 937 del dirstate._map.filefoldmap
938 938 timer(d)
939 939 fm.end()
940 940
941 941 @command(b'perfdirfoldmap', formatteropts)
942 942 def perfdirfoldmap(ui, repo, **opts):
943 943 opts = _byteskwargs(opts)
944 944 timer, fm = gettimer(ui, opts)
945 945 dirstate = repo.dirstate
946 946 b'a' in dirstate
947 947 def d():
948 948 dirstate._map.dirfoldmap.get(b'a')
949 949 del dirstate._map.dirfoldmap
950 950 del dirstate._map._dirs
951 951 timer(d)
952 952 fm.end()
953 953
954 954 @command(b'perfdirstatewrite', formatteropts)
955 955 def perfdirstatewrite(ui, repo, **opts):
956 956 opts = _byteskwargs(opts)
957 957 timer, fm = gettimer(ui, opts)
958 958 ds = repo.dirstate
959 959 b"a" in ds
960 960 def d():
961 961 ds._dirty = True
962 962 ds.write(repo.currenttransaction())
963 963 timer(d)
964 964 fm.end()
965 965
966 966 def _getmergerevs(repo, opts):
967 967 """parse command argument to return rev involved in merge
968 968
969 969 input: options dictionnary with `rev`, `from` and `bse`
970 970 output: (localctx, otherctx, basectx)
971 971 """
972 972 if opts['from']:
973 973 fromrev = scmutil.revsingle(repo, opts['from'])
974 974 wctx = repo[fromrev]
975 975 else:
976 976 wctx = repo[None]
977 977 # we don't want working dir files to be stat'd in the benchmark, so
978 978 # prime that cache
979 979 wctx.dirty()
980 980 rctx = scmutil.revsingle(repo, opts['rev'], opts['rev'])
981 981 if opts['base']:
982 982 fromrev = scmutil.revsingle(repo, opts['base'])
983 983 ancestor = repo[fromrev]
984 984 else:
985 985 ancestor = wctx.ancestor(rctx)
986 986 return (wctx, rctx, ancestor)
987 987
988 988 @command(b'perfmergecalculate',
989 989 [
990 990 (b'r', b'rev', b'.', b'rev to merge against'),
991 991 (b'', b'from', b'', b'rev to merge from'),
992 992 (b'', b'base', b'', b'the revision to use as base'),
993 993 ] + formatteropts)
994 994 def perfmergecalculate(ui, repo, **opts):
995 995 opts = _byteskwargs(opts)
996 996 timer, fm = gettimer(ui, opts)
997 997
998 998 wctx, rctx, ancestor = _getmergerevs(repo, opts)
999 999 def d():
1000 1000 # acceptremote is True because we don't want prompts in the middle of
1001 1001 # our benchmark
1002 1002 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
1003 1003 acceptremote=True, followcopies=True)
1004 1004 timer(d)
1005 1005 fm.end()
1006 1006
1007 @command(b'perfmergecopies',
1008 [
1009 (b'r', b'rev', b'.', b'rev to merge against'),
1010 (b'', b'from', b'', b'rev to merge from'),
1011 (b'', b'base', b'', b'the revision to use as base'),
1012 ] + formatteropts)
1013 def perfmergecopies(ui, repo, **opts):
1014 """measure runtime of `copies.mergecopies`"""
1015 opts = _byteskwargs(opts)
1016 timer, fm = gettimer(ui, opts)
1017 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1018 def d():
1019 # acceptremote is True because we don't want prompts in the middle of
1020 # our benchmark
1021 copies.mergecopies(repo, wctx, rctx, ancestor)
1022 timer(d)
1023 fm.end()
1024
1007 1025 @command(b'perfpathcopies', [], b"REV REV")
1008 1026 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1009 1027 """benchmark the copy tracing logic"""
1010 1028 opts = _byteskwargs(opts)
1011 1029 timer, fm = gettimer(ui, opts)
1012 1030 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1013 1031 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1014 1032 def d():
1015 1033 copies.pathcopies(ctx1, ctx2)
1016 1034 timer(d)
1017 1035 fm.end()
1018 1036
1019 1037 @command(b'perfphases',
1020 1038 [(b'', b'full', False, b'include file reading time too'),
1021 1039 ], b"")
1022 1040 def perfphases(ui, repo, **opts):
1023 1041 """benchmark phasesets computation"""
1024 1042 opts = _byteskwargs(opts)
1025 1043 timer, fm = gettimer(ui, opts)
1026 1044 _phases = repo._phasecache
1027 1045 full = opts.get(b'full')
1028 1046 def d():
1029 1047 phases = _phases
1030 1048 if full:
1031 1049 clearfilecache(repo, b'_phasecache')
1032 1050 phases = repo._phasecache
1033 1051 phases.invalidate()
1034 1052 phases.loadphaserevs(repo)
1035 1053 timer(d)
1036 1054 fm.end()
1037 1055
1038 1056 @command(b'perfphasesremote',
1039 1057 [], b"[DEST]")
1040 1058 def perfphasesremote(ui, repo, dest=None, **opts):
1041 1059 """benchmark time needed to analyse phases of the remote server"""
1042 1060 from mercurial.node import (
1043 1061 bin,
1044 1062 )
1045 1063 from mercurial import (
1046 1064 exchange,
1047 1065 hg,
1048 1066 phases,
1049 1067 )
1050 1068 opts = _byteskwargs(opts)
1051 1069 timer, fm = gettimer(ui, opts)
1052 1070
1053 1071 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
1054 1072 if not path:
1055 1073 raise error.Abort((b'default repository not configured!'),
1056 1074 hint=(b"see 'hg help config.paths'"))
1057 1075 dest = path.pushloc or path.loc
1058 1076 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
1059 1077 other = hg.peer(repo, opts, dest)
1060 1078
1061 1079 # easier to perform discovery through the operation
1062 1080 op = exchange.pushoperation(repo, other)
1063 1081 exchange._pushdiscoverychangeset(op)
1064 1082
1065 1083 remotesubset = op.fallbackheads
1066 1084
1067 1085 with other.commandexecutor() as e:
1068 1086 remotephases = e.callcommand(b'listkeys',
1069 1087 {b'namespace': b'phases'}).result()
1070 1088 del other
1071 1089 publishing = remotephases.get(b'publishing', False)
1072 1090 if publishing:
1073 1091 ui.status((b'publishing: yes\n'))
1074 1092 else:
1075 1093 ui.status((b'publishing: no\n'))
1076 1094
1077 1095 nodemap = repo.changelog.nodemap
1078 1096 nonpublishroots = 0
1079 1097 for nhex, phase in remotephases.iteritems():
1080 1098 if nhex == b'publishing': # ignore data related to publish option
1081 1099 continue
1082 1100 node = bin(nhex)
1083 1101 if node in nodemap and int(phase):
1084 1102 nonpublishroots += 1
1085 1103 ui.status((b'number of roots: %d\n') % len(remotephases))
1086 1104 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
1087 1105 def d():
1088 1106 phases.remotephasessummary(repo,
1089 1107 remotesubset,
1090 1108 remotephases)
1091 1109 timer(d)
1092 1110 fm.end()
1093 1111
1094 1112 @command(b'perfmanifest',[
1095 1113 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1096 1114 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1097 1115 ] + formatteropts, b'REV|NODE')
1098 1116 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1099 1117 """benchmark the time to read a manifest from disk and return a usable
1100 1118 dict-like object
1101 1119
1102 1120 Manifest caches are cleared before retrieval."""
1103 1121 opts = _byteskwargs(opts)
1104 1122 timer, fm = gettimer(ui, opts)
1105 1123 if not manifest_rev:
1106 1124 ctx = scmutil.revsingle(repo, rev, rev)
1107 1125 t = ctx.manifestnode()
1108 1126 else:
1109 1127 from mercurial.node import bin
1110 1128
1111 1129 if len(rev) == 40:
1112 1130 t = bin(rev)
1113 1131 else:
1114 1132 try:
1115 1133 rev = int(rev)
1116 1134
1117 1135 if util.safehasattr(repo.manifestlog, b'getstorage'):
1118 1136 t = repo.manifestlog.getstorage(b'').node(rev)
1119 1137 else:
1120 1138 t = repo.manifestlog._revlog.lookup(rev)
1121 1139 except ValueError:
1122 1140 raise error.Abort(b'manifest revision must be integer or full '
1123 1141 b'node')
1124 1142 def d():
1125 1143 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1126 1144 repo.manifestlog[t].read()
1127 1145 timer(d)
1128 1146 fm.end()
1129 1147
1130 1148 @command(b'perfchangeset', formatteropts)
1131 1149 def perfchangeset(ui, repo, rev, **opts):
1132 1150 opts = _byteskwargs(opts)
1133 1151 timer, fm = gettimer(ui, opts)
1134 1152 n = scmutil.revsingle(repo, rev).node()
1135 1153 def d():
1136 1154 repo.changelog.read(n)
1137 1155 #repo.changelog._cache = None
1138 1156 timer(d)
1139 1157 fm.end()
1140 1158
1141 1159 @command(b'perfignore', formatteropts)
1142 1160 def perfignore(ui, repo, **opts):
1143 1161 """benchmark operation related to computing ignore"""
1144 1162 opts = _byteskwargs(opts)
1145 1163 timer, fm = gettimer(ui, opts)
1146 1164 dirstate = repo.dirstate
1147 1165
1148 1166 def setupone():
1149 1167 dirstate.invalidate()
1150 1168 clearfilecache(dirstate, b'_ignore')
1151 1169
1152 1170 def runone():
1153 1171 dirstate._ignore
1154 1172
1155 1173 timer(runone, setup=setupone, title=b"load")
1156 1174 fm.end()
1157 1175
1158 1176 @command(b'perfindex', [
1159 1177 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1160 1178 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1161 1179 ] + formatteropts)
1162 1180 def perfindex(ui, repo, **opts):
1163 1181 """benchmark index creation time followed by a lookup
1164 1182
1165 1183 The default is to look `tip` up. Depending on the index implementation,
1166 1184 the revision looked up can matters. For example, an implementation
1167 1185 scanning the index will have a faster lookup time for `--rev tip` than for
1168 1186 `--rev 0`. The number of looked up revisions and their order can also
1169 1187 matters.
1170 1188
1171 1189 Example of useful set to test:
1172 1190 * tip
1173 1191 * 0
1174 1192 * -10:
1175 1193 * :10
1176 1194 * -10: + :10
1177 1195 * :10: + -10:
1178 1196 * -10000:
1179 1197 * -10000: + 0
1180 1198
1181 1199 It is not currently possible to check for lookup of a missing node. For
1182 1200 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1183 1201 import mercurial.revlog
1184 1202 opts = _byteskwargs(opts)
1185 1203 timer, fm = gettimer(ui, opts)
1186 1204 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1187 1205 if opts[b'no_lookup']:
1188 1206 if opts['rev']:
1189 1207 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1190 1208 nodes = []
1191 1209 elif not opts[b'rev']:
1192 1210 nodes = [repo[b"tip"].node()]
1193 1211 else:
1194 1212 revs = scmutil.revrange(repo, opts[b'rev'])
1195 1213 cl = repo.changelog
1196 1214 nodes = [cl.node(r) for r in revs]
1197 1215
1198 1216 unfi = repo.unfiltered()
1199 1217 # find the filecache func directly
1200 1218 # This avoid polluting the benchmark with the filecache logic
1201 1219 makecl = unfi.__class__.changelog.func
1202 1220 def setup():
1203 1221 # probably not necessary, but for good measure
1204 1222 clearchangelog(unfi)
1205 1223 def d():
1206 1224 cl = makecl(unfi)
1207 1225 for n in nodes:
1208 1226 cl.rev(n)
1209 1227 timer(d, setup=setup)
1210 1228 fm.end()
1211 1229
1212 1230 @command(b'perfnodemap', [
1213 1231 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1214 1232 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1215 1233 ] + formatteropts)
1216 1234 def perfnodemap(ui, repo, **opts):
1217 1235 """benchmark the time necessary to look up revision from a cold nodemap
1218 1236
1219 1237 Depending on the implementation, the amount and order of revision we look
1220 1238 up can varies. Example of useful set to test:
1221 1239 * tip
1222 1240 * 0
1223 1241 * -10:
1224 1242 * :10
1225 1243 * -10: + :10
1226 1244 * :10: + -10:
1227 1245 * -10000:
1228 1246 * -10000: + 0
1229 1247
1230 1248 The command currently focus on valid binary lookup. Benchmarking for
1231 1249 hexlookup, prefix lookup and missing lookup would also be valuable.
1232 1250 """
1233 1251 import mercurial.revlog
1234 1252 opts = _byteskwargs(opts)
1235 1253 timer, fm = gettimer(ui, opts)
1236 1254 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1237 1255
1238 1256 unfi = repo.unfiltered()
1239 1257 clearcaches = opts['clear_caches']
1240 1258 # find the filecache func directly
1241 1259 # This avoid polluting the benchmark with the filecache logic
1242 1260 makecl = unfi.__class__.changelog.func
1243 1261 if not opts[b'rev']:
1244 1262 raise error.Abort('use --rev to specify revisions to look up')
1245 1263 revs = scmutil.revrange(repo, opts[b'rev'])
1246 1264 cl = repo.changelog
1247 1265 nodes = [cl.node(r) for r in revs]
1248 1266
1249 1267 # use a list to pass reference to a nodemap from one closure to the next
1250 1268 nodeget = [None]
1251 1269 def setnodeget():
1252 1270 # probably not necessary, but for good measure
1253 1271 clearchangelog(unfi)
1254 1272 nodeget[0] = makecl(unfi).nodemap.get
1255 1273
1256 1274 def d():
1257 1275 get = nodeget[0]
1258 1276 for n in nodes:
1259 1277 get(n)
1260 1278
1261 1279 setup = None
1262 1280 if clearcaches:
1263 1281 def setup():
1264 1282 setnodeget()
1265 1283 else:
1266 1284 setnodeget()
1267 1285 d() # prewarm the data structure
1268 1286 timer(d, setup=setup)
1269 1287 fm.end()
1270 1288
1271 1289 @command(b'perfstartup', formatteropts)
1272 1290 def perfstartup(ui, repo, **opts):
1273 1291 opts = _byteskwargs(opts)
1274 1292 timer, fm = gettimer(ui, opts)
1275 1293 def d():
1276 1294 if os.name != r'nt':
1277 1295 os.system(b"HGRCPATH= %s version -q > /dev/null" %
1278 1296 fsencode(sys.argv[0]))
1279 1297 else:
1280 1298 os.environ[r'HGRCPATH'] = r' '
1281 1299 os.system(r"%s version -q > NUL" % sys.argv[0])
1282 1300 timer(d)
1283 1301 fm.end()
1284 1302
1285 1303 @command(b'perfparents', formatteropts)
1286 1304 def perfparents(ui, repo, **opts):
1287 1305 """benchmark the time necessary to fetch one changeset's parents.
1288 1306
1289 1307 The fetch is done using the `node identifier`, traversing all object layers
1290 1308 from the repository object. The first N revisions will be used for this
1291 1309 benchmark. N is controlled by the ``perf.parentscount`` config option
1292 1310 (default: 1000).
1293 1311 """
1294 1312 opts = _byteskwargs(opts)
1295 1313 timer, fm = gettimer(ui, opts)
1296 1314 # control the number of commits perfparents iterates over
1297 1315 # experimental config: perf.parentscount
1298 1316 count = getint(ui, b"perf", b"parentscount", 1000)
1299 1317 if len(repo.changelog) < count:
1300 1318 raise error.Abort(b"repo needs %d commits for this test" % count)
1301 1319 repo = repo.unfiltered()
1302 1320 nl = [repo.changelog.node(i) for i in _xrange(count)]
1303 1321 def d():
1304 1322 for n in nl:
1305 1323 repo.changelog.parents(n)
1306 1324 timer(d)
1307 1325 fm.end()
1308 1326
1309 1327 @command(b'perfctxfiles', formatteropts)
1310 1328 def perfctxfiles(ui, repo, x, **opts):
1311 1329 opts = _byteskwargs(opts)
1312 1330 x = int(x)
1313 1331 timer, fm = gettimer(ui, opts)
1314 1332 def d():
1315 1333 len(repo[x].files())
1316 1334 timer(d)
1317 1335 fm.end()
1318 1336
1319 1337 @command(b'perfrawfiles', formatteropts)
1320 1338 def perfrawfiles(ui, repo, x, **opts):
1321 1339 opts = _byteskwargs(opts)
1322 1340 x = int(x)
1323 1341 timer, fm = gettimer(ui, opts)
1324 1342 cl = repo.changelog
1325 1343 def d():
1326 1344 len(cl.read(x)[3])
1327 1345 timer(d)
1328 1346 fm.end()
1329 1347
1330 1348 @command(b'perflookup', formatteropts)
1331 1349 def perflookup(ui, repo, rev, **opts):
1332 1350 opts = _byteskwargs(opts)
1333 1351 timer, fm = gettimer(ui, opts)
1334 1352 timer(lambda: len(repo.lookup(rev)))
1335 1353 fm.end()
1336 1354
1337 1355 @command(b'perflinelogedits',
1338 1356 [(b'n', b'edits', 10000, b'number of edits'),
1339 1357 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1340 1358 ], norepo=True)
1341 1359 def perflinelogedits(ui, **opts):
1342 1360 from mercurial import linelog
1343 1361
1344 1362 opts = _byteskwargs(opts)
1345 1363
1346 1364 edits = opts[b'edits']
1347 1365 maxhunklines = opts[b'max_hunk_lines']
1348 1366
1349 1367 maxb1 = 100000
1350 1368 random.seed(0)
1351 1369 randint = random.randint
1352 1370 currentlines = 0
1353 1371 arglist = []
1354 1372 for rev in _xrange(edits):
1355 1373 a1 = randint(0, currentlines)
1356 1374 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1357 1375 b1 = randint(0, maxb1)
1358 1376 b2 = randint(b1, b1 + maxhunklines)
1359 1377 currentlines += (b2 - b1) - (a2 - a1)
1360 1378 arglist.append((rev, a1, a2, b1, b2))
1361 1379
1362 1380 def d():
1363 1381 ll = linelog.linelog()
1364 1382 for args in arglist:
1365 1383 ll.replacelines(*args)
1366 1384
1367 1385 timer, fm = gettimer(ui, opts)
1368 1386 timer(d)
1369 1387 fm.end()
1370 1388
1371 1389 @command(b'perfrevrange', formatteropts)
1372 1390 def perfrevrange(ui, repo, *specs, **opts):
1373 1391 opts = _byteskwargs(opts)
1374 1392 timer, fm = gettimer(ui, opts)
1375 1393 revrange = scmutil.revrange
1376 1394 timer(lambda: len(revrange(repo, specs)))
1377 1395 fm.end()
1378 1396
1379 1397 @command(b'perfnodelookup', formatteropts)
1380 1398 def perfnodelookup(ui, repo, rev, **opts):
1381 1399 opts = _byteskwargs(opts)
1382 1400 timer, fm = gettimer(ui, opts)
1383 1401 import mercurial.revlog
1384 1402 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1385 1403 n = scmutil.revsingle(repo, rev).node()
1386 1404 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1387 1405 def d():
1388 1406 cl.rev(n)
1389 1407 clearcaches(cl)
1390 1408 timer(d)
1391 1409 fm.end()
1392 1410
1393 1411 @command(b'perflog',
1394 1412 [(b'', b'rename', False, b'ask log to follow renames')
1395 1413 ] + formatteropts)
1396 1414 def perflog(ui, repo, rev=None, **opts):
1397 1415 opts = _byteskwargs(opts)
1398 1416 if rev is None:
1399 1417 rev=[]
1400 1418 timer, fm = gettimer(ui, opts)
1401 1419 ui.pushbuffer()
1402 1420 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1403 1421 copies=opts.get(b'rename')))
1404 1422 ui.popbuffer()
1405 1423 fm.end()
1406 1424
1407 1425 @command(b'perfmoonwalk', formatteropts)
1408 1426 def perfmoonwalk(ui, repo, **opts):
1409 1427 """benchmark walking the changelog backwards
1410 1428
1411 1429 This also loads the changelog data for each revision in the changelog.
1412 1430 """
1413 1431 opts = _byteskwargs(opts)
1414 1432 timer, fm = gettimer(ui, opts)
1415 1433 def moonwalk():
1416 1434 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1417 1435 ctx = repo[i]
1418 1436 ctx.branch() # read changelog data (in addition to the index)
1419 1437 timer(moonwalk)
1420 1438 fm.end()
1421 1439
1422 1440 @command(b'perftemplating',
1423 1441 [(b'r', b'rev', [], b'revisions to run the template on'),
1424 1442 ] + formatteropts)
1425 1443 def perftemplating(ui, repo, testedtemplate=None, **opts):
1426 1444 """test the rendering time of a given template"""
1427 1445 if makelogtemplater is None:
1428 1446 raise error.Abort((b"perftemplating not available with this Mercurial"),
1429 1447 hint=b"use 4.3 or later")
1430 1448
1431 1449 opts = _byteskwargs(opts)
1432 1450
1433 1451 nullui = ui.copy()
1434 1452 nullui.fout = open(os.devnull, r'wb')
1435 1453 nullui.disablepager()
1436 1454 revs = opts.get(b'rev')
1437 1455 if not revs:
1438 1456 revs = [b'all()']
1439 1457 revs = list(scmutil.revrange(repo, revs))
1440 1458
1441 1459 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1442 1460 b' {author|person}: {desc|firstline}\n')
1443 1461 if testedtemplate is None:
1444 1462 testedtemplate = defaulttemplate
1445 1463 displayer = makelogtemplater(nullui, repo, testedtemplate)
1446 1464 def format():
1447 1465 for r in revs:
1448 1466 ctx = repo[r]
1449 1467 displayer.show(ctx)
1450 1468 displayer.flush(ctx)
1451 1469
1452 1470 timer, fm = gettimer(ui, opts)
1453 1471 timer(format)
1454 1472 fm.end()
1455 1473
1456 1474 @command(b'perfhelper-pathcopies', formatteropts +
1457 1475 [
1458 1476 (b'r', b'revs', [], b'restrict search to these revisions'),
1459 1477 (b'', b'timing', False, b'provides extra data (costly)'),
1460 1478 ])
1461 1479 def perfhelperpathcopies(ui, repo, revs=[], **opts):
1462 1480 """find statistic about potential parameters for the `perftracecopies`
1463 1481
1464 1482 This command find source-destination pair relevant for copytracing testing.
1465 1483 It report value for some of the parameters that impact copy tracing time.
1466 1484
1467 1485 If `--timing` is set, rename detection is run and the associated timing
1468 1486 will be reported. The extra details comes at the cost of a slower command
1469 1487 execution.
1470 1488
1471 1489 Since the rename detection is only run once, other factors might easily
1472 1490 affect the precision of the timing. However it should give a good
1473 1491 approximation of which revision pairs are very costly.
1474 1492 """
1475 1493 opts = _byteskwargs(opts)
1476 1494 fm = ui.formatter(b'perf', opts)
1477 1495 dotiming = opts[b'timing']
1478 1496
1479 1497 if dotiming:
1480 1498 header = '%12s %12s %12s %12s %12s %12s\n'
1481 1499 output = ("%(source)12s %(destination)12s "
1482 1500 "%(nbrevs)12d %(nbmissingfiles)12d "
1483 1501 "%(nbrenamedfiles)12d %(time)18.5f\n")
1484 1502 header_names = ("source", "destination", "nb-revs", "nb-files",
1485 1503 "nb-renames", "time")
1486 1504 fm.plain(header % header_names)
1487 1505 else:
1488 1506 header = '%12s %12s %12s %12s\n'
1489 1507 output = ("%(source)12s %(destination)12s "
1490 1508 "%(nbrevs)12d %(nbmissingfiles)12d\n")
1491 1509 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
1492 1510
1493 1511 if not revs:
1494 1512 revs = ['all()']
1495 1513 revs = scmutil.revrange(repo, revs)
1496 1514
1497 1515 roi = repo.revs('merge() and %ld', revs)
1498 1516 for r in roi:
1499 1517 ctx = repo[r]
1500 1518 p1 = ctx.p1().rev()
1501 1519 p2 = ctx.p2().rev()
1502 1520 bases = repo.changelog._commonancestorsheads(p1, p2)
1503 1521 for p in (p1, p2):
1504 1522 for b in bases:
1505 1523 base = repo[b]
1506 1524 parent = repo[p]
1507 1525 missing = copies._computeforwardmissing(base, parent)
1508 1526 if not missing:
1509 1527 continue
1510 1528 data = {
1511 1529 b'source': base.hex(),
1512 1530 b'destination': parent.hex(),
1513 1531 b'nbrevs': len(repo.revs('%d::%d', b, p)),
1514 1532 b'nbmissingfiles': len(missing),
1515 1533 }
1516 1534 if dotiming:
1517 1535 begin = util.timer()
1518 1536 renames = copies.pathcopies(base, parent)
1519 1537 end = util.timer()
1520 1538 # not very stable timing since we did only one run
1521 1539 data['time'] = end - begin
1522 1540 data['nbrenamedfiles'] = len(renames)
1523 1541 fm.startitem()
1524 1542 fm.data(**data)
1525 1543 out = data.copy()
1526 1544 out['source'] = fm.hexfunc(base.node())
1527 1545 out['destination'] = fm.hexfunc(parent.node())
1528 1546 fm.plain(output % out)
1529 1547
1530 1548 fm.end()
1531 1549
1532 1550 @command(b'perfcca', formatteropts)
1533 1551 def perfcca(ui, repo, **opts):
1534 1552 opts = _byteskwargs(opts)
1535 1553 timer, fm = gettimer(ui, opts)
1536 1554 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1537 1555 fm.end()
1538 1556
1539 1557 @command(b'perffncacheload', formatteropts)
1540 1558 def perffncacheload(ui, repo, **opts):
1541 1559 opts = _byteskwargs(opts)
1542 1560 timer, fm = gettimer(ui, opts)
1543 1561 s = repo.store
1544 1562 def d():
1545 1563 s.fncache._load()
1546 1564 timer(d)
1547 1565 fm.end()
1548 1566
1549 1567 @command(b'perffncachewrite', formatteropts)
1550 1568 def perffncachewrite(ui, repo, **opts):
1551 1569 opts = _byteskwargs(opts)
1552 1570 timer, fm = gettimer(ui, opts)
1553 1571 s = repo.store
1554 1572 lock = repo.lock()
1555 1573 s.fncache._load()
1556 1574 tr = repo.transaction(b'perffncachewrite')
1557 1575 tr.addbackup(b'fncache')
1558 1576 def d():
1559 1577 s.fncache._dirty = True
1560 1578 s.fncache.write(tr)
1561 1579 timer(d)
1562 1580 tr.close()
1563 1581 lock.release()
1564 1582 fm.end()
1565 1583
1566 1584 @command(b'perffncacheencode', formatteropts)
1567 1585 def perffncacheencode(ui, repo, **opts):
1568 1586 opts = _byteskwargs(opts)
1569 1587 timer, fm = gettimer(ui, opts)
1570 1588 s = repo.store
1571 1589 s.fncache._load()
1572 1590 def d():
1573 1591 for p in s.fncache.entries:
1574 1592 s.encode(p)
1575 1593 timer(d)
1576 1594 fm.end()
1577 1595
1578 1596 def _bdiffworker(q, blocks, xdiff, ready, done):
1579 1597 while not done.is_set():
1580 1598 pair = q.get()
1581 1599 while pair is not None:
1582 1600 if xdiff:
1583 1601 mdiff.bdiff.xdiffblocks(*pair)
1584 1602 elif blocks:
1585 1603 mdiff.bdiff.blocks(*pair)
1586 1604 else:
1587 1605 mdiff.textdiff(*pair)
1588 1606 q.task_done()
1589 1607 pair = q.get()
1590 1608 q.task_done() # for the None one
1591 1609 with ready:
1592 1610 ready.wait()
1593 1611
1594 1612 def _manifestrevision(repo, mnode):
1595 1613 ml = repo.manifestlog
1596 1614
1597 1615 if util.safehasattr(ml, b'getstorage'):
1598 1616 store = ml.getstorage(b'')
1599 1617 else:
1600 1618 store = ml._revlog
1601 1619
1602 1620 return store.revision(mnode)
1603 1621
1604 1622 @command(b'perfbdiff', revlogopts + formatteropts + [
1605 1623 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1606 1624 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1607 1625 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1608 1626 (b'', b'blocks', False, b'test computing diffs into blocks'),
1609 1627 (b'', b'xdiff', False, b'use xdiff algorithm'),
1610 1628 ],
1611 1629
1612 1630 b'-c|-m|FILE REV')
1613 1631 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1614 1632 """benchmark a bdiff between revisions
1615 1633
1616 1634 By default, benchmark a bdiff between its delta parent and itself.
1617 1635
1618 1636 With ``--count``, benchmark bdiffs between delta parents and self for N
1619 1637 revisions starting at the specified revision.
1620 1638
1621 1639 With ``--alldata``, assume the requested revision is a changeset and
1622 1640 measure bdiffs for all changes related to that changeset (manifest
1623 1641 and filelogs).
1624 1642 """
1625 1643 opts = _byteskwargs(opts)
1626 1644
1627 1645 if opts[b'xdiff'] and not opts[b'blocks']:
1628 1646 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1629 1647
1630 1648 if opts[b'alldata']:
1631 1649 opts[b'changelog'] = True
1632 1650
1633 1651 if opts.get(b'changelog') or opts.get(b'manifest'):
1634 1652 file_, rev = None, file_
1635 1653 elif rev is None:
1636 1654 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1637 1655
1638 1656 blocks = opts[b'blocks']
1639 1657 xdiff = opts[b'xdiff']
1640 1658 textpairs = []
1641 1659
1642 1660 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1643 1661
1644 1662 startrev = r.rev(r.lookup(rev))
1645 1663 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1646 1664 if opts[b'alldata']:
1647 1665 # Load revisions associated with changeset.
1648 1666 ctx = repo[rev]
1649 1667 mtext = _manifestrevision(repo, ctx.manifestnode())
1650 1668 for pctx in ctx.parents():
1651 1669 pman = _manifestrevision(repo, pctx.manifestnode())
1652 1670 textpairs.append((pman, mtext))
1653 1671
1654 1672 # Load filelog revisions by iterating manifest delta.
1655 1673 man = ctx.manifest()
1656 1674 pman = ctx.p1().manifest()
1657 1675 for filename, change in pman.diff(man).items():
1658 1676 fctx = repo.file(filename)
1659 1677 f1 = fctx.revision(change[0][0] or -1)
1660 1678 f2 = fctx.revision(change[1][0] or -1)
1661 1679 textpairs.append((f1, f2))
1662 1680 else:
1663 1681 dp = r.deltaparent(rev)
1664 1682 textpairs.append((r.revision(dp), r.revision(rev)))
1665 1683
1666 1684 withthreads = threads > 0
1667 1685 if not withthreads:
1668 1686 def d():
1669 1687 for pair in textpairs:
1670 1688 if xdiff:
1671 1689 mdiff.bdiff.xdiffblocks(*pair)
1672 1690 elif blocks:
1673 1691 mdiff.bdiff.blocks(*pair)
1674 1692 else:
1675 1693 mdiff.textdiff(*pair)
1676 1694 else:
1677 1695 q = queue()
1678 1696 for i in _xrange(threads):
1679 1697 q.put(None)
1680 1698 ready = threading.Condition()
1681 1699 done = threading.Event()
1682 1700 for i in _xrange(threads):
1683 1701 threading.Thread(target=_bdiffworker,
1684 1702 args=(q, blocks, xdiff, ready, done)).start()
1685 1703 q.join()
1686 1704 def d():
1687 1705 for pair in textpairs:
1688 1706 q.put(pair)
1689 1707 for i in _xrange(threads):
1690 1708 q.put(None)
1691 1709 with ready:
1692 1710 ready.notify_all()
1693 1711 q.join()
1694 1712 timer, fm = gettimer(ui, opts)
1695 1713 timer(d)
1696 1714 fm.end()
1697 1715
1698 1716 if withthreads:
1699 1717 done.set()
1700 1718 for i in _xrange(threads):
1701 1719 q.put(None)
1702 1720 with ready:
1703 1721 ready.notify_all()
1704 1722
1705 1723 @command(b'perfunidiff', revlogopts + formatteropts + [
1706 1724 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1707 1725 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
1708 1726 ], b'-c|-m|FILE REV')
1709 1727 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1710 1728 """benchmark a unified diff between revisions
1711 1729
1712 1730 This doesn't include any copy tracing - it's just a unified diff
1713 1731 of the texts.
1714 1732
1715 1733 By default, benchmark a diff between its delta parent and itself.
1716 1734
1717 1735 With ``--count``, benchmark diffs between delta parents and self for N
1718 1736 revisions starting at the specified revision.
1719 1737
1720 1738 With ``--alldata``, assume the requested revision is a changeset and
1721 1739 measure diffs for all changes related to that changeset (manifest
1722 1740 and filelogs).
1723 1741 """
1724 1742 opts = _byteskwargs(opts)
1725 1743 if opts[b'alldata']:
1726 1744 opts[b'changelog'] = True
1727 1745
1728 1746 if opts.get(b'changelog') or opts.get(b'manifest'):
1729 1747 file_, rev = None, file_
1730 1748 elif rev is None:
1731 1749 raise error.CommandError(b'perfunidiff', b'invalid arguments')
1732 1750
1733 1751 textpairs = []
1734 1752
1735 1753 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
1736 1754
1737 1755 startrev = r.rev(r.lookup(rev))
1738 1756 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1739 1757 if opts[b'alldata']:
1740 1758 # Load revisions associated with changeset.
1741 1759 ctx = repo[rev]
1742 1760 mtext = _manifestrevision(repo, ctx.manifestnode())
1743 1761 for pctx in ctx.parents():
1744 1762 pman = _manifestrevision(repo, pctx.manifestnode())
1745 1763 textpairs.append((pman, mtext))
1746 1764
1747 1765 # Load filelog revisions by iterating manifest delta.
1748 1766 man = ctx.manifest()
1749 1767 pman = ctx.p1().manifest()
1750 1768 for filename, change in pman.diff(man).items():
1751 1769 fctx = repo.file(filename)
1752 1770 f1 = fctx.revision(change[0][0] or -1)
1753 1771 f2 = fctx.revision(change[1][0] or -1)
1754 1772 textpairs.append((f1, f2))
1755 1773 else:
1756 1774 dp = r.deltaparent(rev)
1757 1775 textpairs.append((r.revision(dp), r.revision(rev)))
1758 1776
1759 1777 def d():
1760 1778 for left, right in textpairs:
1761 1779 # The date strings don't matter, so we pass empty strings.
1762 1780 headerlines, hunks = mdiff.unidiff(
1763 1781 left, b'', right, b'', b'left', b'right', binary=False)
1764 1782 # consume iterators in roughly the way patch.py does
1765 1783 b'\n'.join(headerlines)
1766 1784 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1767 1785 timer, fm = gettimer(ui, opts)
1768 1786 timer(d)
1769 1787 fm.end()
1770 1788
1771 1789 @command(b'perfdiffwd', formatteropts)
1772 1790 def perfdiffwd(ui, repo, **opts):
1773 1791 """Profile diff of working directory changes"""
1774 1792 opts = _byteskwargs(opts)
1775 1793 timer, fm = gettimer(ui, opts)
1776 1794 options = {
1777 1795 'w': 'ignore_all_space',
1778 1796 'b': 'ignore_space_change',
1779 1797 'B': 'ignore_blank_lines',
1780 1798 }
1781 1799
1782 1800 for diffopt in ('', 'w', 'b', 'B', 'wB'):
1783 1801 opts = dict((options[c], b'1') for c in diffopt)
1784 1802 def d():
1785 1803 ui.pushbuffer()
1786 1804 commands.diff(ui, repo, **opts)
1787 1805 ui.popbuffer()
1788 1806 diffopt = diffopt.encode('ascii')
1789 1807 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
1790 1808 timer(d, title=title)
1791 1809 fm.end()
1792 1810
1793 1811 @command(b'perfrevlogindex', revlogopts + formatteropts,
1794 1812 b'-c|-m|FILE')
1795 1813 def perfrevlogindex(ui, repo, file_=None, **opts):
1796 1814 """Benchmark operations against a revlog index.
1797 1815
1798 1816 This tests constructing a revlog instance, reading index data,
1799 1817 parsing index data, and performing various operations related to
1800 1818 index data.
1801 1819 """
1802 1820
1803 1821 opts = _byteskwargs(opts)
1804 1822
1805 1823 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
1806 1824
1807 1825 opener = getattr(rl, 'opener') # trick linter
1808 1826 indexfile = rl.indexfile
1809 1827 data = opener.read(indexfile)
1810 1828
1811 1829 header = struct.unpack(b'>I', data[0:4])[0]
1812 1830 version = header & 0xFFFF
1813 1831 if version == 1:
1814 1832 revlogio = revlog.revlogio()
1815 1833 inline = header & (1 << 16)
1816 1834 else:
1817 1835 raise error.Abort((b'unsupported revlog version: %d') % version)
1818 1836
1819 1837 rllen = len(rl)
1820 1838
1821 1839 node0 = rl.node(0)
1822 1840 node25 = rl.node(rllen // 4)
1823 1841 node50 = rl.node(rllen // 2)
1824 1842 node75 = rl.node(rllen // 4 * 3)
1825 1843 node100 = rl.node(rllen - 1)
1826 1844
1827 1845 allrevs = range(rllen)
1828 1846 allrevsrev = list(reversed(allrevs))
1829 1847 allnodes = [rl.node(rev) for rev in range(rllen)]
1830 1848 allnodesrev = list(reversed(allnodes))
1831 1849
1832 1850 def constructor():
1833 1851 revlog.revlog(opener, indexfile)
1834 1852
1835 1853 def read():
1836 1854 with opener(indexfile) as fh:
1837 1855 fh.read()
1838 1856
1839 1857 def parseindex():
1840 1858 revlogio.parseindex(data, inline)
1841 1859
1842 1860 def getentry(revornode):
1843 1861 index = revlogio.parseindex(data, inline)[0]
1844 1862 index[revornode]
1845 1863
1846 1864 def getentries(revs, count=1):
1847 1865 index = revlogio.parseindex(data, inline)[0]
1848 1866
1849 1867 for i in range(count):
1850 1868 for rev in revs:
1851 1869 index[rev]
1852 1870
1853 1871 def resolvenode(node):
1854 1872 nodemap = revlogio.parseindex(data, inline)[1]
1855 1873 # This only works for the C code.
1856 1874 if nodemap is None:
1857 1875 return
1858 1876
1859 1877 try:
1860 1878 nodemap[node]
1861 1879 except error.RevlogError:
1862 1880 pass
1863 1881
1864 1882 def resolvenodes(nodes, count=1):
1865 1883 nodemap = revlogio.parseindex(data, inline)[1]
1866 1884 if nodemap is None:
1867 1885 return
1868 1886
1869 1887 for i in range(count):
1870 1888 for node in nodes:
1871 1889 try:
1872 1890 nodemap[node]
1873 1891 except error.RevlogError:
1874 1892 pass
1875 1893
1876 1894 benches = [
1877 1895 (constructor, b'revlog constructor'),
1878 1896 (read, b'read'),
1879 1897 (parseindex, b'create index object'),
1880 1898 (lambda: getentry(0), b'retrieve index entry for rev 0'),
1881 1899 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
1882 1900 (lambda: resolvenode(node0), b'look up node at rev 0'),
1883 1901 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
1884 1902 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
1885 1903 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
1886 1904 (lambda: resolvenode(node100), b'look up node at tip'),
1887 1905 # 2x variation is to measure caching impact.
1888 1906 (lambda: resolvenodes(allnodes),
1889 1907 b'look up all nodes (forward)'),
1890 1908 (lambda: resolvenodes(allnodes, 2),
1891 1909 b'look up all nodes 2x (forward)'),
1892 1910 (lambda: resolvenodes(allnodesrev),
1893 1911 b'look up all nodes (reverse)'),
1894 1912 (lambda: resolvenodes(allnodesrev, 2),
1895 1913 b'look up all nodes 2x (reverse)'),
1896 1914 (lambda: getentries(allrevs),
1897 1915 b'retrieve all index entries (forward)'),
1898 1916 (lambda: getentries(allrevs, 2),
1899 1917 b'retrieve all index entries 2x (forward)'),
1900 1918 (lambda: getentries(allrevsrev),
1901 1919 b'retrieve all index entries (reverse)'),
1902 1920 (lambda: getentries(allrevsrev, 2),
1903 1921 b'retrieve all index entries 2x (reverse)'),
1904 1922 ]
1905 1923
1906 1924 for fn, title in benches:
1907 1925 timer, fm = gettimer(ui, opts)
1908 1926 timer(fn, title=title)
1909 1927 fm.end()
1910 1928
1911 1929 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
1912 1930 [(b'd', b'dist', 100, b'distance between the revisions'),
1913 1931 (b's', b'startrev', 0, b'revision to start reading at'),
1914 1932 (b'', b'reverse', False, b'read in reverse')],
1915 1933 b'-c|-m|FILE')
1916 1934 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
1917 1935 **opts):
1918 1936 """Benchmark reading a series of revisions from a revlog.
1919 1937
1920 1938 By default, we read every ``-d/--dist`` revision from 0 to tip of
1921 1939 the specified revlog.
1922 1940
1923 1941 The start revision can be defined via ``-s/--startrev``.
1924 1942 """
1925 1943 opts = _byteskwargs(opts)
1926 1944
1927 1945 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
1928 1946 rllen = getlen(ui)(rl)
1929 1947
1930 1948 if startrev < 0:
1931 1949 startrev = rllen + startrev
1932 1950
1933 1951 def d():
1934 1952 rl.clearcaches()
1935 1953
1936 1954 beginrev = startrev
1937 1955 endrev = rllen
1938 1956 dist = opts[b'dist']
1939 1957
1940 1958 if reverse:
1941 1959 beginrev, endrev = endrev - 1, beginrev - 1
1942 1960 dist = -1 * dist
1943 1961
1944 1962 for x in _xrange(beginrev, endrev, dist):
1945 1963 # Old revisions don't support passing int.
1946 1964 n = rl.node(x)
1947 1965 rl.revision(n)
1948 1966
1949 1967 timer, fm = gettimer(ui, opts)
1950 1968 timer(d)
1951 1969 fm.end()
1952 1970
1953 1971 @command(b'perfrevlogwrite', revlogopts + formatteropts +
1954 1972 [(b's', b'startrev', 1000, b'revision to start writing at'),
1955 1973 (b'', b'stoprev', -1, b'last revision to write'),
1956 1974 (b'', b'count', 3, b'last revision to write'),
1957 1975 (b'', b'details', False, b'print timing for every revisions tested'),
1958 1976 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
1959 1977 (b'', b'lazydeltabase', True, b'try the provided delta first'),
1960 1978 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1961 1979 ],
1962 1980 b'-c|-m|FILE')
1963 1981 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
1964 1982 """Benchmark writing a series of revisions to a revlog.
1965 1983
1966 1984 Possible source values are:
1967 1985 * `full`: add from a full text (default).
1968 1986 * `parent-1`: add from a delta to the first parent
1969 1987 * `parent-2`: add from a delta to the second parent if it exists
1970 1988 (use a delta from the first parent otherwise)
1971 1989 * `parent-smallest`: add from the smallest delta (either p1 or p2)
1972 1990 * `storage`: add from the existing precomputed deltas
1973 1991 """
1974 1992 opts = _byteskwargs(opts)
1975 1993
1976 1994 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
1977 1995 rllen = getlen(ui)(rl)
1978 1996 if startrev < 0:
1979 1997 startrev = rllen + startrev
1980 1998 if stoprev < 0:
1981 1999 stoprev = rllen + stoprev
1982 2000
1983 2001 lazydeltabase = opts['lazydeltabase']
1984 2002 source = opts['source']
1985 2003 clearcaches = opts['clear_caches']
1986 2004 validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest',
1987 2005 b'storage')
1988 2006 if source not in validsource:
1989 2007 raise error.Abort('invalid source type: %s' % source)
1990 2008
1991 2009 ### actually gather results
1992 2010 count = opts['count']
1993 2011 if count <= 0:
1994 2012 raise error.Abort('invalide run count: %d' % count)
1995 2013 allresults = []
1996 2014 for c in range(count):
1997 2015 timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1,
1998 2016 lazydeltabase=lazydeltabase,
1999 2017 clearcaches=clearcaches)
2000 2018 allresults.append(timing)
2001 2019
2002 2020 ### consolidate the results in a single list
2003 2021 results = []
2004 2022 for idx, (rev, t) in enumerate(allresults[0]):
2005 2023 ts = [t]
2006 2024 for other in allresults[1:]:
2007 2025 orev, ot = other[idx]
2008 2026 assert orev == rev
2009 2027 ts.append(ot)
2010 2028 results.append((rev, ts))
2011 2029 resultcount = len(results)
2012 2030
2013 2031 ### Compute and display relevant statistics
2014 2032
2015 2033 # get a formatter
2016 2034 fm = ui.formatter(b'perf', opts)
2017 2035 displayall = ui.configbool(b"perf", b"all-timing", False)
2018 2036
2019 2037 # print individual details if requested
2020 2038 if opts['details']:
2021 2039 for idx, item in enumerate(results, 1):
2022 2040 rev, data = item
2023 2041 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
2024 2042 formatone(fm, data, title=title, displayall=displayall)
2025 2043
2026 2044 # sorts results by median time
2027 2045 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
2028 2046 # list of (name, index) to display)
2029 2047 relevants = [
2030 2048 ("min", 0),
2031 2049 ("10%", resultcount * 10 // 100),
2032 2050 ("25%", resultcount * 25 // 100),
2033 2051 ("50%", resultcount * 70 // 100),
2034 2052 ("75%", resultcount * 75 // 100),
2035 2053 ("90%", resultcount * 90 // 100),
2036 2054 ("95%", resultcount * 95 // 100),
2037 2055 ("99%", resultcount * 99 // 100),
2038 2056 ("99.9%", resultcount * 999 // 1000),
2039 2057 ("99.99%", resultcount * 9999 // 10000),
2040 2058 ("99.999%", resultcount * 99999 // 100000),
2041 2059 ("max", -1),
2042 2060 ]
2043 2061 if not ui.quiet:
2044 2062 for name, idx in relevants:
2045 2063 data = results[idx]
2046 2064 title = '%s of %d, rev %d' % (name, resultcount, data[0])
2047 2065 formatone(fm, data[1], title=title, displayall=displayall)
2048 2066
2049 2067 # XXX summing that many float will not be very precise, we ignore this fact
2050 2068 # for now
2051 2069 totaltime = []
2052 2070 for item in allresults:
2053 2071 totaltime.append((sum(x[1][0] for x in item),
2054 2072 sum(x[1][1] for x in item),
2055 2073 sum(x[1][2] for x in item),)
2056 2074 )
2057 2075 formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
2058 2076 displayall=displayall)
2059 2077 fm.end()
2060 2078
2061 2079 class _faketr(object):
2062 2080 def add(s, x, y, z=None):
2063 2081 return None
2064 2082
2065 2083 def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None,
2066 2084 lazydeltabase=True, clearcaches=True):
2067 2085 timings = []
2068 2086 tr = _faketr()
2069 2087 with _temprevlog(ui, orig, startrev) as dest:
2070 2088 dest._lazydeltabase = lazydeltabase
2071 2089 revs = list(orig.revs(startrev, stoprev))
2072 2090 total = len(revs)
2073 2091 topic = 'adding'
2074 2092 if runidx is not None:
2075 2093 topic += ' (run #%d)' % runidx
2076 2094 # Support both old and new progress API
2077 2095 if util.safehasattr(ui, 'makeprogress'):
2078 2096 progress = ui.makeprogress(topic, unit='revs', total=total)
2079 2097 def updateprogress(pos):
2080 2098 progress.update(pos)
2081 2099 def completeprogress():
2082 2100 progress.complete()
2083 2101 else:
2084 2102 def updateprogress(pos):
2085 2103 ui.progress(topic, pos, unit='revs', total=total)
2086 2104 def completeprogress():
2087 2105 ui.progress(topic, None, unit='revs', total=total)
2088 2106
2089 2107 for idx, rev in enumerate(revs):
2090 2108 updateprogress(idx)
2091 2109 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
2092 2110 if clearcaches:
2093 2111 dest.index.clearcaches()
2094 2112 dest.clearcaches()
2095 2113 with timeone() as r:
2096 2114 dest.addrawrevision(*addargs, **addkwargs)
2097 2115 timings.append((rev, r[0]))
2098 2116 updateprogress(total)
2099 2117 completeprogress()
2100 2118 return timings
2101 2119
2102 2120 def _getrevisionseed(orig, rev, tr, source):
2103 2121 from mercurial.node import nullid
2104 2122
2105 2123 linkrev = orig.linkrev(rev)
2106 2124 node = orig.node(rev)
2107 2125 p1, p2 = orig.parents(node)
2108 2126 flags = orig.flags(rev)
2109 2127 cachedelta = None
2110 2128 text = None
2111 2129
2112 2130 if source == b'full':
2113 2131 text = orig.revision(rev)
2114 2132 elif source == b'parent-1':
2115 2133 baserev = orig.rev(p1)
2116 2134 cachedelta = (baserev, orig.revdiff(p1, rev))
2117 2135 elif source == b'parent-2':
2118 2136 parent = p2
2119 2137 if p2 == nullid:
2120 2138 parent = p1
2121 2139 baserev = orig.rev(parent)
2122 2140 cachedelta = (baserev, orig.revdiff(parent, rev))
2123 2141 elif source == b'parent-smallest':
2124 2142 p1diff = orig.revdiff(p1, rev)
2125 2143 parent = p1
2126 2144 diff = p1diff
2127 2145 if p2 != nullid:
2128 2146 p2diff = orig.revdiff(p2, rev)
2129 2147 if len(p1diff) > len(p2diff):
2130 2148 parent = p2
2131 2149 diff = p2diff
2132 2150 baserev = orig.rev(parent)
2133 2151 cachedelta = (baserev, diff)
2134 2152 elif source == b'storage':
2135 2153 baserev = orig.deltaparent(rev)
2136 2154 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
2137 2155
2138 2156 return ((text, tr, linkrev, p1, p2),
2139 2157 {'node': node, 'flags': flags, 'cachedelta': cachedelta})
2140 2158
2141 2159 @contextlib.contextmanager
2142 2160 def _temprevlog(ui, orig, truncaterev):
2143 2161 from mercurial import vfs as vfsmod
2144 2162
2145 2163 if orig._inline:
2146 2164 raise error.Abort('not supporting inline revlog (yet)')
2147 2165
2148 2166 origindexpath = orig.opener.join(orig.indexfile)
2149 2167 origdatapath = orig.opener.join(orig.datafile)
2150 2168 indexname = 'revlog.i'
2151 2169 dataname = 'revlog.d'
2152 2170
2153 2171 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
2154 2172 try:
2155 2173 # copy the data file in a temporary directory
2156 2174 ui.debug('copying data in %s\n' % tmpdir)
2157 2175 destindexpath = os.path.join(tmpdir, 'revlog.i')
2158 2176 destdatapath = os.path.join(tmpdir, 'revlog.d')
2159 2177 shutil.copyfile(origindexpath, destindexpath)
2160 2178 shutil.copyfile(origdatapath, destdatapath)
2161 2179
2162 2180 # remove the data we want to add again
2163 2181 ui.debug('truncating data to be rewritten\n')
2164 2182 with open(destindexpath, 'ab') as index:
2165 2183 index.seek(0)
2166 2184 index.truncate(truncaterev * orig._io.size)
2167 2185 with open(destdatapath, 'ab') as data:
2168 2186 data.seek(0)
2169 2187 data.truncate(orig.start(truncaterev))
2170 2188
2171 2189 # instantiate a new revlog from the temporary copy
2172 2190 ui.debug('truncating adding to be rewritten\n')
2173 2191 vfs = vfsmod.vfs(tmpdir)
2174 2192 vfs.options = getattr(orig.opener, 'options', None)
2175 2193
2176 2194 dest = revlog.revlog(vfs,
2177 2195 indexfile=indexname,
2178 2196 datafile=dataname)
2179 2197 if dest._inline:
2180 2198 raise error.Abort('not supporting inline revlog (yet)')
2181 2199 # make sure internals are initialized
2182 2200 dest.revision(len(dest) - 1)
2183 2201 yield dest
2184 2202 del dest, vfs
2185 2203 finally:
2186 2204 shutil.rmtree(tmpdir, True)
2187 2205
2188 2206 @command(b'perfrevlogchunks', revlogopts + formatteropts +
2189 2207 [(b'e', b'engines', b'', b'compression engines to use'),
2190 2208 (b's', b'startrev', 0, b'revision to start at')],
2191 2209 b'-c|-m|FILE')
2192 2210 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
2193 2211 """Benchmark operations on revlog chunks.
2194 2212
2195 2213 Logically, each revlog is a collection of fulltext revisions. However,
2196 2214 stored within each revlog are "chunks" of possibly compressed data. This
2197 2215 data needs to be read and decompressed or compressed and written.
2198 2216
2199 2217 This command measures the time it takes to read+decompress and recompress
2200 2218 chunks in a revlog. It effectively isolates I/O and compression performance.
2201 2219 For measurements of higher-level operations like resolving revisions,
2202 2220 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
2203 2221 """
2204 2222 opts = _byteskwargs(opts)
2205 2223
2206 2224 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
2207 2225
2208 2226 # _chunkraw was renamed to _getsegmentforrevs.
2209 2227 try:
2210 2228 segmentforrevs = rl._getsegmentforrevs
2211 2229 except AttributeError:
2212 2230 segmentforrevs = rl._chunkraw
2213 2231
2214 2232 # Verify engines argument.
2215 2233 if engines:
2216 2234 engines = set(e.strip() for e in engines.split(b','))
2217 2235 for engine in engines:
2218 2236 try:
2219 2237 util.compressionengines[engine]
2220 2238 except KeyError:
2221 2239 raise error.Abort(b'unknown compression engine: %s' % engine)
2222 2240 else:
2223 2241 engines = []
2224 2242 for e in util.compengines:
2225 2243 engine = util.compengines[e]
2226 2244 try:
2227 2245 if engine.available():
2228 2246 engine.revlogcompressor().compress(b'dummy')
2229 2247 engines.append(e)
2230 2248 except NotImplementedError:
2231 2249 pass
2232 2250
2233 2251 revs = list(rl.revs(startrev, len(rl) - 1))
2234 2252
2235 2253 def rlfh(rl):
2236 2254 if rl._inline:
2237 2255 return getsvfs(repo)(rl.indexfile)
2238 2256 else:
2239 2257 return getsvfs(repo)(rl.datafile)
2240 2258
2241 2259 def doread():
2242 2260 rl.clearcaches()
2243 2261 for rev in revs:
2244 2262 segmentforrevs(rev, rev)
2245 2263
2246 2264 def doreadcachedfh():
2247 2265 rl.clearcaches()
2248 2266 fh = rlfh(rl)
2249 2267 for rev in revs:
2250 2268 segmentforrevs(rev, rev, df=fh)
2251 2269
2252 2270 def doreadbatch():
2253 2271 rl.clearcaches()
2254 2272 segmentforrevs(revs[0], revs[-1])
2255 2273
2256 2274 def doreadbatchcachedfh():
2257 2275 rl.clearcaches()
2258 2276 fh = rlfh(rl)
2259 2277 segmentforrevs(revs[0], revs[-1], df=fh)
2260 2278
2261 2279 def dochunk():
2262 2280 rl.clearcaches()
2263 2281 fh = rlfh(rl)
2264 2282 for rev in revs:
2265 2283 rl._chunk(rev, df=fh)
2266 2284
2267 2285 chunks = [None]
2268 2286
2269 2287 def dochunkbatch():
2270 2288 rl.clearcaches()
2271 2289 fh = rlfh(rl)
2272 2290 # Save chunks as a side-effect.
2273 2291 chunks[0] = rl._chunks(revs, df=fh)
2274 2292
2275 2293 def docompress(compressor):
2276 2294 rl.clearcaches()
2277 2295
2278 2296 try:
2279 2297 # Swap in the requested compression engine.
2280 2298 oldcompressor = rl._compressor
2281 2299 rl._compressor = compressor
2282 2300 for chunk in chunks[0]:
2283 2301 rl.compress(chunk)
2284 2302 finally:
2285 2303 rl._compressor = oldcompressor
2286 2304
2287 2305 benches = [
2288 2306 (lambda: doread(), b'read'),
2289 2307 (lambda: doreadcachedfh(), b'read w/ reused fd'),
2290 2308 (lambda: doreadbatch(), b'read batch'),
2291 2309 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
2292 2310 (lambda: dochunk(), b'chunk'),
2293 2311 (lambda: dochunkbatch(), b'chunk batch'),
2294 2312 ]
2295 2313
2296 2314 for engine in sorted(engines):
2297 2315 compressor = util.compengines[engine].revlogcompressor()
2298 2316 benches.append((functools.partial(docompress, compressor),
2299 2317 b'compress w/ %s' % engine))
2300 2318
2301 2319 for fn, title in benches:
2302 2320 timer, fm = gettimer(ui, opts)
2303 2321 timer(fn, title=title)
2304 2322 fm.end()
2305 2323
2306 2324 @command(b'perfrevlogrevision', revlogopts + formatteropts +
2307 2325 [(b'', b'cache', False, b'use caches instead of clearing')],
2308 2326 b'-c|-m|FILE REV')
2309 2327 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
2310 2328 """Benchmark obtaining a revlog revision.
2311 2329
2312 2330 Obtaining a revlog revision consists of roughly the following steps:
2313 2331
2314 2332 1. Compute the delta chain
2315 2333 2. Slice the delta chain if applicable
2316 2334 3. Obtain the raw chunks for that delta chain
2317 2335 4. Decompress each raw chunk
2318 2336 5. Apply binary patches to obtain fulltext
2319 2337 6. Verify hash of fulltext
2320 2338
2321 2339 This command measures the time spent in each of these phases.
2322 2340 """
2323 2341 opts = _byteskwargs(opts)
2324 2342
2325 2343 if opts.get(b'changelog') or opts.get(b'manifest'):
2326 2344 file_, rev = None, file_
2327 2345 elif rev is None:
2328 2346 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
2329 2347
2330 2348 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
2331 2349
2332 2350 # _chunkraw was renamed to _getsegmentforrevs.
2333 2351 try:
2334 2352 segmentforrevs = r._getsegmentforrevs
2335 2353 except AttributeError:
2336 2354 segmentforrevs = r._chunkraw
2337 2355
2338 2356 node = r.lookup(rev)
2339 2357 rev = r.rev(node)
2340 2358
2341 2359 def getrawchunks(data, chain):
2342 2360 start = r.start
2343 2361 length = r.length
2344 2362 inline = r._inline
2345 2363 iosize = r._io.size
2346 2364 buffer = util.buffer
2347 2365
2348 2366 chunks = []
2349 2367 ladd = chunks.append
2350 2368 for idx, item in enumerate(chain):
2351 2369 offset = start(item[0])
2352 2370 bits = data[idx]
2353 2371 for rev in item:
2354 2372 chunkstart = start(rev)
2355 2373 if inline:
2356 2374 chunkstart += (rev + 1) * iosize
2357 2375 chunklength = length(rev)
2358 2376 ladd(buffer(bits, chunkstart - offset, chunklength))
2359 2377
2360 2378 return chunks
2361 2379
2362 2380 def dodeltachain(rev):
2363 2381 if not cache:
2364 2382 r.clearcaches()
2365 2383 r._deltachain(rev)
2366 2384
2367 2385 def doread(chain):
2368 2386 if not cache:
2369 2387 r.clearcaches()
2370 2388 for item in slicedchain:
2371 2389 segmentforrevs(item[0], item[-1])
2372 2390
2373 2391 def doslice(r, chain, size):
2374 2392 for s in slicechunk(r, chain, targetsize=size):
2375 2393 pass
2376 2394
2377 2395 def dorawchunks(data, chain):
2378 2396 if not cache:
2379 2397 r.clearcaches()
2380 2398 getrawchunks(data, chain)
2381 2399
2382 2400 def dodecompress(chunks):
2383 2401 decomp = r.decompress
2384 2402 for chunk in chunks:
2385 2403 decomp(chunk)
2386 2404
2387 2405 def dopatch(text, bins):
2388 2406 if not cache:
2389 2407 r.clearcaches()
2390 2408 mdiff.patches(text, bins)
2391 2409
2392 2410 def dohash(text):
2393 2411 if not cache:
2394 2412 r.clearcaches()
2395 2413 r.checkhash(text, node, rev=rev)
2396 2414
2397 2415 def dorevision():
2398 2416 if not cache:
2399 2417 r.clearcaches()
2400 2418 r.revision(node)
2401 2419
2402 2420 try:
2403 2421 from mercurial.revlogutils.deltas import slicechunk
2404 2422 except ImportError:
2405 2423 slicechunk = getattr(revlog, '_slicechunk', None)
2406 2424
2407 2425 size = r.length(rev)
2408 2426 chain = r._deltachain(rev)[0]
2409 2427 if not getattr(r, '_withsparseread', False):
2410 2428 slicedchain = (chain,)
2411 2429 else:
2412 2430 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
2413 2431 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
2414 2432 rawchunks = getrawchunks(data, slicedchain)
2415 2433 bins = r._chunks(chain)
2416 2434 text = bytes(bins[0])
2417 2435 bins = bins[1:]
2418 2436 text = mdiff.patches(text, bins)
2419 2437
2420 2438 benches = [
2421 2439 (lambda: dorevision(), b'full'),
2422 2440 (lambda: dodeltachain(rev), b'deltachain'),
2423 2441 (lambda: doread(chain), b'read'),
2424 2442 ]
2425 2443
2426 2444 if getattr(r, '_withsparseread', False):
2427 2445 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
2428 2446 benches.append(slicing)
2429 2447
2430 2448 benches.extend([
2431 2449 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
2432 2450 (lambda: dodecompress(rawchunks), b'decompress'),
2433 2451 (lambda: dopatch(text, bins), b'patch'),
2434 2452 (lambda: dohash(text), b'hash'),
2435 2453 ])
2436 2454
2437 2455 timer, fm = gettimer(ui, opts)
2438 2456 for fn, title in benches:
2439 2457 timer(fn, title=title)
2440 2458 fm.end()
2441 2459
2442 2460 @command(b'perfrevset',
2443 2461 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
2444 2462 (b'', b'contexts', False, b'obtain changectx for each revision')]
2445 2463 + formatteropts, b"REVSET")
2446 2464 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
2447 2465 """benchmark the execution time of a revset
2448 2466
2449 2467 Use the --clean option if need to evaluate the impact of build volatile
2450 2468 revisions set cache on the revset execution. Volatile cache hold filtered
2451 2469 and obsolete related cache."""
2452 2470 opts = _byteskwargs(opts)
2453 2471
2454 2472 timer, fm = gettimer(ui, opts)
2455 2473 def d():
2456 2474 if clear:
2457 2475 repo.invalidatevolatilesets()
2458 2476 if contexts:
2459 2477 for ctx in repo.set(expr): pass
2460 2478 else:
2461 2479 for r in repo.revs(expr): pass
2462 2480 timer(d)
2463 2481 fm.end()
2464 2482
2465 2483 @command(b'perfvolatilesets',
2466 2484 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
2467 2485 ] + formatteropts)
2468 2486 def perfvolatilesets(ui, repo, *names, **opts):
2469 2487 """benchmark the computation of various volatile set
2470 2488
2471 2489 Volatile set computes element related to filtering and obsolescence."""
2472 2490 opts = _byteskwargs(opts)
2473 2491 timer, fm = gettimer(ui, opts)
2474 2492 repo = repo.unfiltered()
2475 2493
2476 2494 def getobs(name):
2477 2495 def d():
2478 2496 repo.invalidatevolatilesets()
2479 2497 if opts[b'clear_obsstore']:
2480 2498 clearfilecache(repo, b'obsstore')
2481 2499 obsolete.getrevs(repo, name)
2482 2500 return d
2483 2501
2484 2502 allobs = sorted(obsolete.cachefuncs)
2485 2503 if names:
2486 2504 allobs = [n for n in allobs if n in names]
2487 2505
2488 2506 for name in allobs:
2489 2507 timer(getobs(name), title=name)
2490 2508
2491 2509 def getfiltered(name):
2492 2510 def d():
2493 2511 repo.invalidatevolatilesets()
2494 2512 if opts[b'clear_obsstore']:
2495 2513 clearfilecache(repo, b'obsstore')
2496 2514 repoview.filterrevs(repo, name)
2497 2515 return d
2498 2516
2499 2517 allfilter = sorted(repoview.filtertable)
2500 2518 if names:
2501 2519 allfilter = [n for n in allfilter if n in names]
2502 2520
2503 2521 for name in allfilter:
2504 2522 timer(getfiltered(name), title=name)
2505 2523 fm.end()
2506 2524
2507 2525 @command(b'perfbranchmap',
2508 2526 [(b'f', b'full', False,
2509 2527 b'Includes build time of subset'),
2510 2528 (b'', b'clear-revbranch', False,
2511 2529 b'purge the revbranch cache between computation'),
2512 2530 ] + formatteropts)
2513 2531 def perfbranchmap(ui, repo, *filternames, **opts):
2514 2532 """benchmark the update of a branchmap
2515 2533
2516 2534 This benchmarks the full repo.branchmap() call with read and write disabled
2517 2535 """
2518 2536 opts = _byteskwargs(opts)
2519 2537 full = opts.get(b"full", False)
2520 2538 clear_revbranch = opts.get(b"clear_revbranch", False)
2521 2539 timer, fm = gettimer(ui, opts)
2522 2540 def getbranchmap(filtername):
2523 2541 """generate a benchmark function for the filtername"""
2524 2542 if filtername is None:
2525 2543 view = repo
2526 2544 else:
2527 2545 view = repo.filtered(filtername)
2528 2546 if util.safehasattr(view._branchcaches, '_per_filter'):
2529 2547 filtered = view._branchcaches._per_filter
2530 2548 else:
2531 2549 # older versions
2532 2550 filtered = view._branchcaches
2533 2551 def d():
2534 2552 if clear_revbranch:
2535 2553 repo.revbranchcache()._clear()
2536 2554 if full:
2537 2555 view._branchcaches.clear()
2538 2556 else:
2539 2557 filtered.pop(filtername, None)
2540 2558 view.branchmap()
2541 2559 return d
2542 2560 # add filter in smaller subset to bigger subset
2543 2561 possiblefilters = set(repoview.filtertable)
2544 2562 if filternames:
2545 2563 possiblefilters &= set(filternames)
2546 2564 subsettable = getbranchmapsubsettable()
2547 2565 allfilters = []
2548 2566 while possiblefilters:
2549 2567 for name in possiblefilters:
2550 2568 subset = subsettable.get(name)
2551 2569 if subset not in possiblefilters:
2552 2570 break
2553 2571 else:
2554 2572 assert False, b'subset cycle %s!' % possiblefilters
2555 2573 allfilters.append(name)
2556 2574 possiblefilters.remove(name)
2557 2575
2558 2576 # warm the cache
2559 2577 if not full:
2560 2578 for name in allfilters:
2561 2579 repo.filtered(name).branchmap()
2562 2580 if not filternames or b'unfiltered' in filternames:
2563 2581 # add unfiltered
2564 2582 allfilters.append(None)
2565 2583
2566 2584 if util.safehasattr(branchmap.branchcache, 'fromfile'):
2567 2585 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
2568 2586 branchcacheread.set(classmethod(lambda *args: None))
2569 2587 else:
2570 2588 # older versions
2571 2589 branchcacheread = safeattrsetter(branchmap, b'read')
2572 2590 branchcacheread.set(lambda *args: None)
2573 2591 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
2574 2592 branchcachewrite.set(lambda *args: None)
2575 2593 try:
2576 2594 for name in allfilters:
2577 2595 printname = name
2578 2596 if name is None:
2579 2597 printname = b'unfiltered'
2580 2598 timer(getbranchmap(name), title=str(printname))
2581 2599 finally:
2582 2600 branchcacheread.restore()
2583 2601 branchcachewrite.restore()
2584 2602 fm.end()
2585 2603
2586 2604 @command(b'perfbranchmapupdate', [
2587 2605 (b'', b'base', [], b'subset of revision to start from'),
2588 2606 (b'', b'target', [], b'subset of revision to end with'),
2589 2607 (b'', b'clear-caches', False, b'clear cache between each runs')
2590 2608 ] + formatteropts)
2591 2609 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
2592 2610 """benchmark branchmap update from for <base> revs to <target> revs
2593 2611
2594 2612 If `--clear-caches` is passed, the following items will be reset before
2595 2613 each update:
2596 2614 * the changelog instance and associated indexes
2597 2615 * the rev-branch-cache instance
2598 2616
2599 2617 Examples:
2600 2618
2601 2619 # update for the one last revision
2602 2620 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
2603 2621
2604 2622 $ update for change coming with a new branch
2605 2623 $ hg perfbranchmapupdate --base 'stable' --target 'default'
2606 2624 """
2607 2625 from mercurial import branchmap
2608 2626 from mercurial import repoview
2609 2627 opts = _byteskwargs(opts)
2610 2628 timer, fm = gettimer(ui, opts)
2611 2629 clearcaches = opts[b'clear_caches']
2612 2630 unfi = repo.unfiltered()
2613 2631 x = [None] # used to pass data between closure
2614 2632
2615 2633 # we use a `list` here to avoid possible side effect from smartset
2616 2634 baserevs = list(scmutil.revrange(repo, base))
2617 2635 targetrevs = list(scmutil.revrange(repo, target))
2618 2636 if not baserevs:
2619 2637 raise error.Abort(b'no revisions selected for --base')
2620 2638 if not targetrevs:
2621 2639 raise error.Abort(b'no revisions selected for --target')
2622 2640
2623 2641 # make sure the target branchmap also contains the one in the base
2624 2642 targetrevs = list(set(baserevs) | set(targetrevs))
2625 2643 targetrevs.sort()
2626 2644
2627 2645 cl = repo.changelog
2628 2646 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
2629 2647 allbaserevs.sort()
2630 2648 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
2631 2649
2632 2650 newrevs = list(alltargetrevs.difference(allbaserevs))
2633 2651 newrevs.sort()
2634 2652
2635 2653 allrevs = frozenset(unfi.changelog.revs())
2636 2654 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
2637 2655 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
2638 2656
2639 2657 def basefilter(repo, visibilityexceptions=None):
2640 2658 return basefilterrevs
2641 2659
2642 2660 def targetfilter(repo, visibilityexceptions=None):
2643 2661 return targetfilterrevs
2644 2662
2645 2663 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
2646 2664 ui.status(msg % (len(allbaserevs), len(newrevs)))
2647 2665 if targetfilterrevs:
2648 2666 msg = b'(%d revisions still filtered)\n'
2649 2667 ui.status(msg % len(targetfilterrevs))
2650 2668
2651 2669 try:
2652 2670 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
2653 2671 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
2654 2672
2655 2673 baserepo = repo.filtered(b'__perf_branchmap_update_base')
2656 2674 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
2657 2675
2658 2676 # try to find an existing branchmap to reuse
2659 2677 subsettable = getbranchmapsubsettable()
2660 2678 candidatefilter = subsettable.get(None)
2661 2679 while candidatefilter is not None:
2662 2680 candidatebm = repo.filtered(candidatefilter).branchmap()
2663 2681 if candidatebm.validfor(baserepo):
2664 2682 filtered = repoview.filterrevs(repo, candidatefilter)
2665 2683 missing = [r for r in allbaserevs if r in filtered]
2666 2684 base = candidatebm.copy()
2667 2685 base.update(baserepo, missing)
2668 2686 break
2669 2687 candidatefilter = subsettable.get(candidatefilter)
2670 2688 else:
2671 2689 # no suitable subset where found
2672 2690 base = branchmap.branchcache()
2673 2691 base.update(baserepo, allbaserevs)
2674 2692
2675 2693 def setup():
2676 2694 x[0] = base.copy()
2677 2695 if clearcaches:
2678 2696 unfi._revbranchcache = None
2679 2697 clearchangelog(repo)
2680 2698
2681 2699 def bench():
2682 2700 x[0].update(targetrepo, newrevs)
2683 2701
2684 2702 timer(bench, setup=setup)
2685 2703 fm.end()
2686 2704 finally:
2687 2705 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
2688 2706 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
2689 2707
2690 2708 @command(b'perfbranchmapload', [
2691 2709 (b'f', b'filter', b'', b'Specify repoview filter'),
2692 2710 (b'', b'list', False, b'List brachmap filter caches'),
2693 2711 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
2694 2712
2695 2713 ] + formatteropts)
2696 2714 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
2697 2715 """benchmark reading the branchmap"""
2698 2716 opts = _byteskwargs(opts)
2699 2717 clearrevlogs = opts[b'clear_revlogs']
2700 2718
2701 2719 if list:
2702 2720 for name, kind, st in repo.cachevfs.readdir(stat=True):
2703 2721 if name.startswith(b'branch2'):
2704 2722 filtername = name.partition(b'-')[2] or b'unfiltered'
2705 2723 ui.status(b'%s - %s\n'
2706 2724 % (filtername, util.bytecount(st.st_size)))
2707 2725 return
2708 2726 if not filter:
2709 2727 filter = None
2710 2728 subsettable = getbranchmapsubsettable()
2711 2729 if filter is None:
2712 2730 repo = repo.unfiltered()
2713 2731 else:
2714 2732 repo = repoview.repoview(repo, filter)
2715 2733
2716 2734 repo.branchmap() # make sure we have a relevant, up to date branchmap
2717 2735
2718 2736 try:
2719 2737 fromfile = branchmap.branchcache.fromfile
2720 2738 except AttributeError:
2721 2739 # older versions
2722 2740 fromfile = branchmap.read
2723 2741
2724 2742 currentfilter = filter
2725 2743 # try once without timer, the filter may not be cached
2726 2744 while fromfile(repo) is None:
2727 2745 currentfilter = subsettable.get(currentfilter)
2728 2746 if currentfilter is None:
2729 2747 raise error.Abort(b'No branchmap cached for %s repo'
2730 2748 % (filter or b'unfiltered'))
2731 2749 repo = repo.filtered(currentfilter)
2732 2750 timer, fm = gettimer(ui, opts)
2733 2751 def setup():
2734 2752 if clearrevlogs:
2735 2753 clearchangelog(repo)
2736 2754 def bench():
2737 2755 fromfile(repo)
2738 2756 timer(bench, setup=setup)
2739 2757 fm.end()
2740 2758
2741 2759 @command(b'perfloadmarkers')
2742 2760 def perfloadmarkers(ui, repo):
2743 2761 """benchmark the time to parse the on-disk markers for a repo
2744 2762
2745 2763 Result is the number of markers in the repo."""
2746 2764 timer, fm = gettimer(ui)
2747 2765 svfs = getsvfs(repo)
2748 2766 timer(lambda: len(obsolete.obsstore(svfs)))
2749 2767 fm.end()
2750 2768
2751 2769 @command(b'perflrucachedict', formatteropts +
2752 2770 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
2753 2771 (b'', b'mincost', 0, b'smallest cost of items in cache'),
2754 2772 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
2755 2773 (b'', b'size', 4, b'size of cache'),
2756 2774 (b'', b'gets', 10000, b'number of key lookups'),
2757 2775 (b'', b'sets', 10000, b'number of key sets'),
2758 2776 (b'', b'mixed', 10000, b'number of mixed mode operations'),
2759 2777 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
2760 2778 norepo=True)
2761 2779 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
2762 2780 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
2763 2781 opts = _byteskwargs(opts)
2764 2782
2765 2783 def doinit():
2766 2784 for i in _xrange(10000):
2767 2785 util.lrucachedict(size)
2768 2786
2769 2787 costrange = list(range(mincost, maxcost + 1))
2770 2788
2771 2789 values = []
2772 2790 for i in _xrange(size):
2773 2791 values.append(random.randint(0, _maxint))
2774 2792
2775 2793 # Get mode fills the cache and tests raw lookup performance with no
2776 2794 # eviction.
2777 2795 getseq = []
2778 2796 for i in _xrange(gets):
2779 2797 getseq.append(random.choice(values))
2780 2798
2781 2799 def dogets():
2782 2800 d = util.lrucachedict(size)
2783 2801 for v in values:
2784 2802 d[v] = v
2785 2803 for key in getseq:
2786 2804 value = d[key]
2787 2805 value # silence pyflakes warning
2788 2806
2789 2807 def dogetscost():
2790 2808 d = util.lrucachedict(size, maxcost=costlimit)
2791 2809 for i, v in enumerate(values):
2792 2810 d.insert(v, v, cost=costs[i])
2793 2811 for key in getseq:
2794 2812 try:
2795 2813 value = d[key]
2796 2814 value # silence pyflakes warning
2797 2815 except KeyError:
2798 2816 pass
2799 2817
2800 2818 # Set mode tests insertion speed with cache eviction.
2801 2819 setseq = []
2802 2820 costs = []
2803 2821 for i in _xrange(sets):
2804 2822 setseq.append(random.randint(0, _maxint))
2805 2823 costs.append(random.choice(costrange))
2806 2824
2807 2825 def doinserts():
2808 2826 d = util.lrucachedict(size)
2809 2827 for v in setseq:
2810 2828 d.insert(v, v)
2811 2829
2812 2830 def doinsertscost():
2813 2831 d = util.lrucachedict(size, maxcost=costlimit)
2814 2832 for i, v in enumerate(setseq):
2815 2833 d.insert(v, v, cost=costs[i])
2816 2834
2817 2835 def dosets():
2818 2836 d = util.lrucachedict(size)
2819 2837 for v in setseq:
2820 2838 d[v] = v
2821 2839
2822 2840 # Mixed mode randomly performs gets and sets with eviction.
2823 2841 mixedops = []
2824 2842 for i in _xrange(mixed):
2825 2843 r = random.randint(0, 100)
2826 2844 if r < mixedgetfreq:
2827 2845 op = 0
2828 2846 else:
2829 2847 op = 1
2830 2848
2831 2849 mixedops.append((op,
2832 2850 random.randint(0, size * 2),
2833 2851 random.choice(costrange)))
2834 2852
2835 2853 def domixed():
2836 2854 d = util.lrucachedict(size)
2837 2855
2838 2856 for op, v, cost in mixedops:
2839 2857 if op == 0:
2840 2858 try:
2841 2859 d[v]
2842 2860 except KeyError:
2843 2861 pass
2844 2862 else:
2845 2863 d[v] = v
2846 2864
2847 2865 def domixedcost():
2848 2866 d = util.lrucachedict(size, maxcost=costlimit)
2849 2867
2850 2868 for op, v, cost in mixedops:
2851 2869 if op == 0:
2852 2870 try:
2853 2871 d[v]
2854 2872 except KeyError:
2855 2873 pass
2856 2874 else:
2857 2875 d.insert(v, v, cost=cost)
2858 2876
2859 2877 benches = [
2860 2878 (doinit, b'init'),
2861 2879 ]
2862 2880
2863 2881 if costlimit:
2864 2882 benches.extend([
2865 2883 (dogetscost, b'gets w/ cost limit'),
2866 2884 (doinsertscost, b'inserts w/ cost limit'),
2867 2885 (domixedcost, b'mixed w/ cost limit'),
2868 2886 ])
2869 2887 else:
2870 2888 benches.extend([
2871 2889 (dogets, b'gets'),
2872 2890 (doinserts, b'inserts'),
2873 2891 (dosets, b'sets'),
2874 2892 (domixed, b'mixed')
2875 2893 ])
2876 2894
2877 2895 for fn, title in benches:
2878 2896 timer, fm = gettimer(ui, opts)
2879 2897 timer(fn, title=title)
2880 2898 fm.end()
2881 2899
2882 2900 @command(b'perfwrite', formatteropts)
2883 2901 def perfwrite(ui, repo, **opts):
2884 2902 """microbenchmark ui.write
2885 2903 """
2886 2904 opts = _byteskwargs(opts)
2887 2905
2888 2906 timer, fm = gettimer(ui, opts)
2889 2907 def write():
2890 2908 for i in range(100000):
2891 2909 ui.write((b'Testing write performance\n'))
2892 2910 timer(write)
2893 2911 fm.end()
2894 2912
2895 2913 def uisetup(ui):
2896 2914 if (util.safehasattr(cmdutil, b'openrevlog') and
2897 2915 not util.safehasattr(commands, b'debugrevlogopts')):
2898 2916 # for "historical portability":
2899 2917 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
2900 2918 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
2901 2919 # openrevlog() should cause failure, because it has been
2902 2920 # available since 3.5 (or 49c583ca48c4).
2903 2921 def openrevlog(orig, repo, cmd, file_, opts):
2904 2922 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
2905 2923 raise error.Abort(b"This version doesn't support --dir option",
2906 2924 hint=b"use 3.5 or later")
2907 2925 return orig(repo, cmd, file_, opts)
2908 2926 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
2909 2927
2910 2928 @command(b'perfprogress', formatteropts + [
2911 2929 (b'', b'topic', b'topic', b'topic for progress messages'),
2912 2930 (b'c', b'total', 1000000, b'total value we are progressing to'),
2913 2931 ], norepo=True)
2914 2932 def perfprogress(ui, topic=None, total=None, **opts):
2915 2933 """printing of progress bars"""
2916 2934 opts = _byteskwargs(opts)
2917 2935
2918 2936 timer, fm = gettimer(ui, opts)
2919 2937
2920 2938 def doprogress():
2921 2939 with ui.makeprogress(topic, total=total) as progress:
2922 2940 for i in pycompat.xrange(total):
2923 2941 progress.increment()
2924 2942
2925 2943 timer(doprogress)
2926 2944 fm.end()
@@ -1,391 +1,393 b''
1 1 #require test-repo
2 2
3 3 Set vars:
4 4
5 5 $ . "$TESTDIR/helpers-testrepo.sh"
6 6 $ CONTRIBDIR="$TESTDIR/../contrib"
7 7
8 8 Prepare repo:
9 9
10 10 $ hg init
11 11
12 12 $ echo this is file a > a
13 13 $ hg add a
14 14 $ hg commit -m first
15 15
16 16 $ echo adding to file a >> a
17 17 $ hg commit -m second
18 18
19 19 $ echo adding more to file a >> a
20 20 $ hg commit -m third
21 21
22 22 $ hg up -r 0
23 23 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
24 24 $ echo merge-this >> a
25 25 $ hg commit -m merge-able
26 26 created new head
27 27
28 28 $ hg up -r 2
29 29 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
30 30
31 31 perfstatus
32 32
33 33 $ cat >> $HGRCPATH << EOF
34 34 > [extensions]
35 35 > perf=$CONTRIBDIR/perf.py
36 36 > [perf]
37 37 > presleep=0
38 38 > stub=on
39 39 > parentscount=1
40 40 > EOF
41 41 $ hg help -e perf
42 42 perf extension - helper extension to measure performance
43 43
44 44 Configurations
45 45 ==============
46 46
47 47 "perf"
48 48 ------
49 49
50 50 "all-timing"
51 51 When set, additional statistics will be reported for each benchmark: best,
52 52 worst, median average. If not set only the best timing is reported
53 53 (default: off).
54 54
55 55 "presleep"
56 56 number of second to wait before any group of runs (default: 1)
57 57
58 58 "pre-run"
59 59 number of run to perform before starting measurement.
60 60
61 61 "profile-benchmark"
62 62 Enable profiling for the benchmarked section. (The first iteration is
63 63 benchmarked)
64 64
65 65 "run-limits"
66 66 Control the number of runs each benchmark will perform. The option value
67 67 should be a list of '<time>-<numberofrun>' pairs. After each run the
68 68 conditions are considered in order with the following logic:
69 69
70 70 If benchmark has been running for <time> seconds, and we have performed
71 71 <numberofrun> iterations, stop the benchmark,
72 72
73 73 The default value is: '3.0-100, 10.0-3'
74 74
75 75 "stub"
76 76 When set, benchmarks will only be run once, useful for testing (default:
77 77 off)
78 78
79 79 list of commands:
80 80
81 81 perfaddremove
82 82 (no help text available)
83 83 perfancestors
84 84 (no help text available)
85 85 perfancestorset
86 86 (no help text available)
87 87 perfannotate (no help text available)
88 88 perfbdiff benchmark a bdiff between revisions
89 89 perfbookmarks
90 90 benchmark parsing bookmarks from disk to memory
91 91 perfbranchmap
92 92 benchmark the update of a branchmap
93 93 perfbranchmapload
94 94 benchmark reading the branchmap
95 95 perfbranchmapupdate
96 96 benchmark branchmap update from for <base> revs to <target>
97 97 revs
98 98 perfbundleread
99 99 Benchmark reading of bundle files.
100 100 perfcca (no help text available)
101 101 perfchangegroupchangelog
102 102 Benchmark producing a changelog group for a changegroup.
103 103 perfchangeset
104 104 (no help text available)
105 105 perfctxfiles (no help text available)
106 106 perfdiffwd Profile diff of working directory changes
107 107 perfdirfoldmap
108 108 (no help text available)
109 109 perfdirs (no help text available)
110 110 perfdirstate (no help text available)
111 111 perfdirstatedirs
112 112 (no help text available)
113 113 perfdirstatefoldmap
114 114 (no help text available)
115 115 perfdirstatewrite
116 116 (no help text available)
117 117 perfdiscovery
118 118 benchmark discovery between local repo and the peer at given
119 119 path
120 120 perffncacheencode
121 121 (no help text available)
122 122 perffncacheload
123 123 (no help text available)
124 124 perffncachewrite
125 125 (no help text available)
126 126 perfheads benchmark the computation of a changelog heads
127 127 perfhelper-pathcopies
128 128 find statistic about potential parameters for the
129 129 'perftracecopies'
130 130 perfignore benchmark operation related to computing ignore
131 131 perfindex benchmark index creation time followed by a lookup
132 132 perflinelogedits
133 133 (no help text available)
134 134 perfloadmarkers
135 135 benchmark the time to parse the on-disk markers for a repo
136 136 perflog (no help text available)
137 137 perflookup (no help text available)
138 138 perflrucachedict
139 139 (no help text available)
140 140 perfmanifest benchmark the time to read a manifest from disk and return a
141 141 usable
142 142 perfmergecalculate
143 143 (no help text available)
144 perfmergecopies
145 measure runtime of 'copies.mergecopies'
144 146 perfmoonwalk benchmark walking the changelog backwards
145 147 perfnodelookup
146 148 (no help text available)
147 149 perfnodemap benchmark the time necessary to look up revision from a cold
148 150 nodemap
149 151 perfparents benchmark the time necessary to fetch one changeset's parents.
150 152 perfpathcopies
151 153 benchmark the copy tracing logic
152 154 perfphases benchmark phasesets computation
153 155 perfphasesremote
154 156 benchmark time needed to analyse phases of the remote server
155 157 perfprogress printing of progress bars
156 158 perfrawfiles (no help text available)
157 159 perfrevlogchunks
158 160 Benchmark operations on revlog chunks.
159 161 perfrevlogindex
160 162 Benchmark operations against a revlog index.
161 163 perfrevlogrevision
162 164 Benchmark obtaining a revlog revision.
163 165 perfrevlogrevisions
164 166 Benchmark reading a series of revisions from a revlog.
165 167 perfrevlogwrite
166 168 Benchmark writing a series of revisions to a revlog.
167 169 perfrevrange (no help text available)
168 170 perfrevset benchmark the execution time of a revset
169 171 perfstartup (no help text available)
170 172 perfstatus (no help text available)
171 173 perftags (no help text available)
172 174 perftemplating
173 175 test the rendering time of a given template
174 176 perfunidiff benchmark a unified diff between revisions
175 177 perfvolatilesets
176 178 benchmark the computation of various volatile set
177 179 perfwalk (no help text available)
178 180 perfwrite microbenchmark ui.write
179 181
180 182 (use 'hg help -v perf' to show built-in aliases and global options)
181 183 $ hg perfaddremove
182 184 $ hg perfancestors
183 185 $ hg perfancestorset 2
184 186 $ hg perfannotate a
185 187 $ hg perfbdiff -c 1
186 188 $ hg perfbdiff --alldata 1
187 189 $ hg perfunidiff -c 1
188 190 $ hg perfunidiff --alldata 1
189 191 $ hg perfbookmarks
190 192 $ hg perfbranchmap
191 193 $ hg perfbranchmapload
192 194 $ hg perfbranchmapupdate --base "not tip" --target "tip"
193 195 benchmark of branchmap with 3 revisions with 1 new ones
194 196 $ hg perfcca
195 197 $ hg perfchangegroupchangelog
196 198 $ hg perfchangegroupchangelog --cgversion 01
197 199 $ hg perfchangeset 2
198 200 $ hg perfctxfiles 2
199 201 $ hg perfdiffwd
200 202 $ hg perfdirfoldmap
201 203 $ hg perfdirs
202 204 $ hg perfdirstate
203 205 $ hg perfdirstatedirs
204 206 $ hg perfdirstatefoldmap
205 207 $ hg perfdirstatewrite
206 208 #if repofncache
207 209 $ hg perffncacheencode
208 210 $ hg perffncacheload
209 211 $ hg debugrebuildfncache
210 212 fncache already up to date
211 213 $ hg perffncachewrite
212 214 $ hg debugrebuildfncache
213 215 fncache already up to date
214 216 #endif
215 217 $ hg perfheads
216 218 $ hg perfignore
217 219 $ hg perfindex
218 220 $ hg perflinelogedits -n 1
219 221 $ hg perfloadmarkers
220 222 $ hg perflog
221 223 $ hg perflookup 2
222 224 $ hg perflrucache
223 225 $ hg perfmanifest 2
224 226 $ hg perfmanifest -m 44fe2c8352bb3a478ffd7d8350bbc721920134d1
225 227 $ hg perfmanifest -m 44fe2c8352bb
226 228 abort: manifest revision must be integer or full node
227 229 [255]
228 230 $ hg perfmergecalculate -r 3
229 231 $ hg perfmoonwalk
230 232 $ hg perfnodelookup 2
231 233 $ hg perfpathcopies 1 2
232 234 $ hg perfprogress --total 1000
233 235 $ hg perfrawfiles 2
234 236 $ hg perfrevlogindex -c
235 237 #if reporevlogstore
236 238 $ hg perfrevlogrevisions .hg/store/data/a.i
237 239 #endif
238 240 $ hg perfrevlogrevision -m 0
239 241 $ hg perfrevlogchunks -c
240 242 $ hg perfrevrange
241 243 $ hg perfrevset 'all()'
242 244 $ hg perfstartup
243 245 $ hg perfstatus
244 246 $ hg perftags
245 247 $ hg perftemplating
246 248 $ hg perfvolatilesets
247 249 $ hg perfwalk
248 250 $ hg perfparents
249 251 $ hg perfdiscovery -q .
250 252
251 253 Test run control
252 254 ----------------
253 255
254 256 Simple single entry
255 257
256 258 $ hg perfparents --config perf.stub=no --config perf.run-limits='0.000000001-15'
257 259 ! wall * comb * user * sys * (best of 15) (glob)
258 260
259 261 Multiple entries
260 262
261 263 $ hg perfparents --config perf.stub=no --config perf.run-limits='500000-1, 0.000000001-5'
262 264 ! wall * comb * user * sys * (best of 5) (glob)
263 265
264 266 error case are ignored
265 267
266 268 $ hg perfparents --config perf.stub=no --config perf.run-limits='500, 0.000000001-5'
267 269 malformatted run limit entry, missing "-": 500
268 270 ! wall * comb * user * sys * (best of 5) (glob)
269 271 $ hg perfparents --config perf.stub=no --config perf.run-limits='aaa-12, 0.000000001-5'
270 272 malformatted run limit entry, could not convert string to float: aaa: aaa-12 (no-py3 !)
271 273 malformatted run limit entry, could not convert string to float: 'aaa': aaa-12 (py3 !)
272 274 ! wall * comb * user * sys * (best of 5) (glob)
273 275 $ hg perfparents --config perf.stub=no --config perf.run-limits='12-aaaaaa, 0.000000001-5'
274 276 malformatted run limit entry, invalid literal for int() with base 10: 'aaaaaa': 12-aaaaaa
275 277 ! wall * comb * user * sys * (best of 5) (glob)
276 278
277 279 test actual output
278 280 ------------------
279 281
280 282 normal output:
281 283
282 284 $ hg perfheads --config perf.stub=no
283 285 ! wall * comb * user * sys * (best of *) (glob)
284 286
285 287 detailed output:
286 288
287 289 $ hg perfheads --config perf.all-timing=yes --config perf.stub=no
288 290 ! wall * comb * user * sys * (best of *) (glob)
289 291 ! wall * comb * user * sys * (max of *) (glob)
290 292 ! wall * comb * user * sys * (avg of *) (glob)
291 293 ! wall * comb * user * sys * (median of *) (glob)
292 294
293 295 test json output
294 296 ----------------
295 297
296 298 normal output:
297 299
298 300 $ hg perfheads --template json --config perf.stub=no
299 301 [
300 302 {
301 303 "comb": *, (glob)
302 304 "count": *, (glob)
303 305 "sys": *, (glob)
304 306 "user": *, (glob)
305 307 "wall": * (glob)
306 308 }
307 309 ]
308 310
309 311 detailed output:
310 312
311 313 $ hg perfheads --template json --config perf.all-timing=yes --config perf.stub=no
312 314 [
313 315 {
314 316 "avg.comb": *, (glob)
315 317 "avg.count": *, (glob)
316 318 "avg.sys": *, (glob)
317 319 "avg.user": *, (glob)
318 320 "avg.wall": *, (glob)
319 321 "comb": *, (glob)
320 322 "count": *, (glob)
321 323 "max.comb": *, (glob)
322 324 "max.count": *, (glob)
323 325 "max.sys": *, (glob)
324 326 "max.user": *, (glob)
325 327 "max.wall": *, (glob)
326 328 "median.comb": *, (glob)
327 329 "median.count": *, (glob)
328 330 "median.sys": *, (glob)
329 331 "median.user": *, (glob)
330 332 "median.wall": *, (glob)
331 333 "sys": *, (glob)
332 334 "user": *, (glob)
333 335 "wall": * (glob)
334 336 }
335 337 ]
336 338
337 339 Test pre-run feature
338 340 --------------------
339 341
340 342 (perf discovery has some spurious output)
341 343
342 344 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=0
343 345 ! wall * comb * user * sys * (best of 1) (glob)
344 346 searching for changes
345 347 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=1
346 348 ! wall * comb * user * sys * (best of 1) (glob)
347 349 searching for changes
348 350 searching for changes
349 351 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=3
350 352 ! wall * comb * user * sys * (best of 1) (glob)
351 353 searching for changes
352 354 searching for changes
353 355 searching for changes
354 356 searching for changes
355 357
356 358 test profile-benchmark option
357 359 ------------------------------
358 360
359 361 Function to check that statprof ran
360 362 $ statprofran () {
361 363 > egrep 'Sample count:|No samples recorded' > /dev/null
362 364 > }
363 365 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.profile-benchmark=yes 2>&1 | statprofran
364 366
365 367 Check perf.py for historical portability
366 368 ----------------------------------------
367 369
368 370 $ cd "$TESTDIR/.."
369 371
370 372 $ (testrepohg files -r 1.2 glob:mercurial/*.c glob:mercurial/*.py;
371 373 > testrepohg files -r tip glob:mercurial/*.c glob:mercurial/*.py) |
372 374 > "$TESTDIR"/check-perf-code.py contrib/perf.py
373 375 contrib/perf.py:\d+: (re)
374 376 > from mercurial import (
375 377 import newer module separately in try clause for early Mercurial
376 378 contrib/perf.py:\d+: (re)
377 379 > from mercurial import (
378 380 import newer module separately in try clause for early Mercurial
379 381 contrib/perf.py:\d+: (re)
380 382 > origindexpath = orig.opener.join(orig.indexfile)
381 383 use getvfs()/getsvfs() for early Mercurial
382 384 contrib/perf.py:\d+: (re)
383 385 > origdatapath = orig.opener.join(orig.datafile)
384 386 use getvfs()/getsvfs() for early Mercurial
385 387 contrib/perf.py:\d+: (re)
386 388 > vfs = vfsmod.vfs(tmpdir)
387 389 use getvfs()/getsvfs() for early Mercurial
388 390 contrib/perf.py:\d+: (re)
389 391 > vfs.options = getattr(orig.opener, 'options', None)
390 392 use getvfs()/getsvfs() for early Mercurial
391 393 [1]
General Comments 0
You need to be logged in to leave comments. Login now