##// END OF EJS Templates
perf: document config options...
marmoute -
r42182:dbca2e55 default
parent child Browse files
Show More
@@ -1,2781 +1,2799
1 1 # perf.py - performance test routines
2 '''helper extension to measure performance'''
2 '''helper extension to measure performance
3
4 Configurations
5 ==============
6
7 ``perf``
8 --------
9
10 ``all-timing``
11 When set, additional statistic will be reported for each benchmark: best,
12 worst, median average. If not set only the best timing is reported
13 (default: off).
14
15 ``presleep``
16 number of second to wait before any group of run (default: 1)
17
18 ``stub``
19 When set, benchmark will only be run once, useful for testing (default: off)
20 '''
3 21
4 22 # "historical portability" policy of perf.py:
5 23 #
6 24 # We have to do:
7 25 # - make perf.py "loadable" with as wide Mercurial version as possible
8 26 # This doesn't mean that perf commands work correctly with that Mercurial.
9 27 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
10 28 # - make historical perf command work correctly with as wide Mercurial
11 29 # version as possible
12 30 #
13 31 # We have to do, if possible with reasonable cost:
14 32 # - make recent perf command for historical feature work correctly
15 33 # with early Mercurial
16 34 #
17 35 # We don't have to do:
18 36 # - make perf command for recent feature work correctly with early
19 37 # Mercurial
20 38
21 39 from __future__ import absolute_import
22 40 import contextlib
23 41 import functools
24 42 import gc
25 43 import os
26 44 import random
27 45 import shutil
28 46 import struct
29 47 import sys
30 48 import tempfile
31 49 import threading
32 50 import time
33 51 from mercurial import (
34 52 changegroup,
35 53 cmdutil,
36 54 commands,
37 55 copies,
38 56 error,
39 57 extensions,
40 58 hg,
41 59 mdiff,
42 60 merge,
43 61 revlog,
44 62 util,
45 63 )
46 64
47 65 # for "historical portability":
48 66 # try to import modules separately (in dict order), and ignore
49 67 # failure, because these aren't available with early Mercurial
50 68 try:
51 69 from mercurial import branchmap # since 2.5 (or bcee63733aad)
52 70 except ImportError:
53 71 pass
54 72 try:
55 73 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
56 74 except ImportError:
57 75 pass
58 76 try:
59 77 from mercurial import registrar # since 3.7 (or 37d50250b696)
60 78 dir(registrar) # forcibly load it
61 79 except ImportError:
62 80 registrar = None
63 81 try:
64 82 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
65 83 except ImportError:
66 84 pass
67 85 try:
68 86 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
69 87 except ImportError:
70 88 pass
71 89 try:
72 90 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
73 91 except ImportError:
74 92 pass
75 93
76 94
77 95 def identity(a):
78 96 return a
79 97
80 98 try:
81 99 from mercurial import pycompat
82 100 getargspec = pycompat.getargspec # added to module after 4.5
83 101 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
84 102 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
85 103 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
86 104 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
87 105 if pycompat.ispy3:
88 106 _maxint = sys.maxsize # per py3 docs for replacing maxint
89 107 else:
90 108 _maxint = sys.maxint
91 109 except (ImportError, AttributeError):
92 110 import inspect
93 111 getargspec = inspect.getargspec
94 112 _byteskwargs = identity
95 113 fsencode = identity # no py3 support
96 114 _maxint = sys.maxint # no py3 support
97 115 _sysstr = lambda x: x # no py3 support
98 116 _xrange = xrange
99 117
100 118 try:
101 119 # 4.7+
102 120 queue = pycompat.queue.Queue
103 121 except (AttributeError, ImportError):
104 122 # <4.7.
105 123 try:
106 124 queue = pycompat.queue
107 125 except (AttributeError, ImportError):
108 126 queue = util.queue
109 127
110 128 try:
111 129 from mercurial import logcmdutil
112 130 makelogtemplater = logcmdutil.maketemplater
113 131 except (AttributeError, ImportError):
114 132 try:
115 133 makelogtemplater = cmdutil.makelogtemplater
116 134 except (AttributeError, ImportError):
117 135 makelogtemplater = None
118 136
119 137 # for "historical portability":
120 138 # define util.safehasattr forcibly, because util.safehasattr has been
121 139 # available since 1.9.3 (or 94b200a11cf7)
122 140 _undefined = object()
123 141 def safehasattr(thing, attr):
124 142 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
125 143 setattr(util, 'safehasattr', safehasattr)
126 144
127 145 # for "historical portability":
128 146 # define util.timer forcibly, because util.timer has been available
129 147 # since ae5d60bb70c9
130 148 if safehasattr(time, 'perf_counter'):
131 149 util.timer = time.perf_counter
132 150 elif os.name == b'nt':
133 151 util.timer = time.clock
134 152 else:
135 153 util.timer = time.time
136 154
137 155 # for "historical portability":
138 156 # use locally defined empty option list, if formatteropts isn't
139 157 # available, because commands.formatteropts has been available since
140 158 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
141 159 # available since 2.2 (or ae5f92e154d3)
142 160 formatteropts = getattr(cmdutil, "formatteropts",
143 161 getattr(commands, "formatteropts", []))
144 162
145 163 # for "historical portability":
146 164 # use locally defined option list, if debugrevlogopts isn't available,
147 165 # because commands.debugrevlogopts has been available since 3.7 (or
148 166 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
149 167 # since 1.9 (or a79fea6b3e77).
150 168 revlogopts = getattr(cmdutil, "debugrevlogopts",
151 169 getattr(commands, "debugrevlogopts", [
152 170 (b'c', b'changelog', False, (b'open changelog')),
153 171 (b'm', b'manifest', False, (b'open manifest')),
154 172 (b'', b'dir', False, (b'open directory manifest')),
155 173 ]))
156 174
157 175 cmdtable = {}
158 176
159 177 # for "historical portability":
160 178 # define parsealiases locally, because cmdutil.parsealiases has been
161 179 # available since 1.5 (or 6252852b4332)
162 180 def parsealiases(cmd):
163 181 return cmd.split(b"|")
164 182
165 183 if safehasattr(registrar, 'command'):
166 184 command = registrar.command(cmdtable)
167 185 elif safehasattr(cmdutil, 'command'):
168 186 command = cmdutil.command(cmdtable)
169 187 if b'norepo' not in getargspec(command).args:
170 188 # for "historical portability":
171 189 # wrap original cmdutil.command, because "norepo" option has
172 190 # been available since 3.1 (or 75a96326cecb)
173 191 _command = command
174 192 def command(name, options=(), synopsis=None, norepo=False):
175 193 if norepo:
176 194 commands.norepo += b' %s' % b' '.join(parsealiases(name))
177 195 return _command(name, list(options), synopsis)
178 196 else:
179 197 # for "historical portability":
180 198 # define "@command" annotation locally, because cmdutil.command
181 199 # has been available since 1.9 (or 2daa5179e73f)
182 200 def command(name, options=(), synopsis=None, norepo=False):
183 201 def decorator(func):
184 202 if synopsis:
185 203 cmdtable[name] = func, list(options), synopsis
186 204 else:
187 205 cmdtable[name] = func, list(options)
188 206 if norepo:
189 207 commands.norepo += b' %s' % b' '.join(parsealiases(name))
190 208 return func
191 209 return decorator
192 210
193 211 try:
194 212 import mercurial.registrar
195 213 import mercurial.configitems
196 214 configtable = {}
197 215 configitem = mercurial.registrar.configitem(configtable)
198 216 configitem(b'perf', b'presleep',
199 217 default=mercurial.configitems.dynamicdefault,
200 218 )
201 219 configitem(b'perf', b'stub',
202 220 default=mercurial.configitems.dynamicdefault,
203 221 )
204 222 configitem(b'perf', b'parentscount',
205 223 default=mercurial.configitems.dynamicdefault,
206 224 )
207 225 configitem(b'perf', b'all-timing',
208 226 default=mercurial.configitems.dynamicdefault,
209 227 )
210 228 except (ImportError, AttributeError):
211 229 pass
212 230
213 231 def getlen(ui):
214 232 if ui.configbool(b"perf", b"stub", False):
215 233 return lambda x: 1
216 234 return len
217 235
218 236 def gettimer(ui, opts=None):
219 237 """return a timer function and formatter: (timer, formatter)
220 238
221 239 This function exists to gather the creation of formatter in a single
222 240 place instead of duplicating it in all performance commands."""
223 241
224 242 # enforce an idle period before execution to counteract power management
225 243 # experimental config: perf.presleep
226 244 time.sleep(getint(ui, b"perf", b"presleep", 1))
227 245
228 246 if opts is None:
229 247 opts = {}
230 248 # redirect all to stderr unless buffer api is in use
231 249 if not ui._buffers:
232 250 ui = ui.copy()
233 251 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
234 252 if uifout:
235 253 # for "historical portability":
236 254 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
237 255 uifout.set(ui.ferr)
238 256
239 257 # get a formatter
240 258 uiformatter = getattr(ui, 'formatter', None)
241 259 if uiformatter:
242 260 fm = uiformatter(b'perf', opts)
243 261 else:
244 262 # for "historical portability":
245 263 # define formatter locally, because ui.formatter has been
246 264 # available since 2.2 (or ae5f92e154d3)
247 265 from mercurial import node
248 266 class defaultformatter(object):
249 267 """Minimized composition of baseformatter and plainformatter
250 268 """
251 269 def __init__(self, ui, topic, opts):
252 270 self._ui = ui
253 271 if ui.debugflag:
254 272 self.hexfunc = node.hex
255 273 else:
256 274 self.hexfunc = node.short
257 275 def __nonzero__(self):
258 276 return False
259 277 __bool__ = __nonzero__
260 278 def startitem(self):
261 279 pass
262 280 def data(self, **data):
263 281 pass
264 282 def write(self, fields, deftext, *fielddata, **opts):
265 283 self._ui.write(deftext % fielddata, **opts)
266 284 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
267 285 if cond:
268 286 self._ui.write(deftext % fielddata, **opts)
269 287 def plain(self, text, **opts):
270 288 self._ui.write(text, **opts)
271 289 def end(self):
272 290 pass
273 291 fm = defaultformatter(ui, b'perf', opts)
274 292
275 293 # stub function, runs code only once instead of in a loop
276 294 # experimental config: perf.stub
277 295 if ui.configbool(b"perf", b"stub", False):
278 296 return functools.partial(stub_timer, fm), fm
279 297
280 298 # experimental config: perf.all-timing
281 299 displayall = ui.configbool(b"perf", b"all-timing", False)
282 300 return functools.partial(_timer, fm, displayall=displayall), fm
283 301
284 302 def stub_timer(fm, func, setup=None, title=None):
285 303 if setup is not None:
286 304 setup()
287 305 func()
288 306
289 307 @contextlib.contextmanager
290 308 def timeone():
291 309 r = []
292 310 ostart = os.times()
293 311 cstart = util.timer()
294 312 yield r
295 313 cstop = util.timer()
296 314 ostop = os.times()
297 315 a, b = ostart, ostop
298 316 r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
299 317
300 318 def _timer(fm, func, setup=None, title=None, displayall=False):
301 319 gc.collect()
302 320 results = []
303 321 begin = util.timer()
304 322 count = 0
305 323 while True:
306 324 if setup is not None:
307 325 setup()
308 326 with timeone() as item:
309 327 r = func()
310 328 count += 1
311 329 results.append(item[0])
312 330 cstop = util.timer()
313 331 if cstop - begin > 3 and count >= 100:
314 332 break
315 333 if cstop - begin > 10 and count >= 3:
316 334 break
317 335
318 336 formatone(fm, results, title=title, result=r,
319 337 displayall=displayall)
320 338
321 339 def formatone(fm, timings, title=None, result=None, displayall=False):
322 340
323 341 count = len(timings)
324 342
325 343 fm.startitem()
326 344
327 345 if title:
328 346 fm.write(b'title', b'! %s\n', title)
329 347 if result:
330 348 fm.write(b'result', b'! result: %s\n', result)
331 349 def display(role, entry):
332 350 prefix = b''
333 351 if role != b'best':
334 352 prefix = b'%s.' % role
335 353 fm.plain(b'!')
336 354 fm.write(prefix + b'wall', b' wall %f', entry[0])
337 355 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
338 356 fm.write(prefix + b'user', b' user %f', entry[1])
339 357 fm.write(prefix + b'sys', b' sys %f', entry[2])
340 358 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
341 359 fm.plain(b'\n')
342 360 timings.sort()
343 361 min_val = timings[0]
344 362 display(b'best', min_val)
345 363 if displayall:
346 364 max_val = timings[-1]
347 365 display(b'max', max_val)
348 366 avg = tuple([sum(x) / count for x in zip(*timings)])
349 367 display(b'avg', avg)
350 368 median = timings[len(timings) // 2]
351 369 display(b'median', median)
352 370
353 371 # utilities for historical portability
354 372
355 373 def getint(ui, section, name, default):
356 374 # for "historical portability":
357 375 # ui.configint has been available since 1.9 (or fa2b596db182)
358 376 v = ui.config(section, name, None)
359 377 if v is None:
360 378 return default
361 379 try:
362 380 return int(v)
363 381 except ValueError:
364 382 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
365 383 % (section, name, v))
366 384
367 385 def safeattrsetter(obj, name, ignoremissing=False):
368 386 """Ensure that 'obj' has 'name' attribute before subsequent setattr
369 387
370 388 This function is aborted, if 'obj' doesn't have 'name' attribute
371 389 at runtime. This avoids overlooking removal of an attribute, which
372 390 breaks assumption of performance measurement, in the future.
373 391
374 392 This function returns the object to (1) assign a new value, and
375 393 (2) restore an original value to the attribute.
376 394
377 395 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
378 396 abortion, and this function returns None. This is useful to
379 397 examine an attribute, which isn't ensured in all Mercurial
380 398 versions.
381 399 """
382 400 if not util.safehasattr(obj, name):
383 401 if ignoremissing:
384 402 return None
385 403 raise error.Abort((b"missing attribute %s of %s might break assumption"
386 404 b" of performance measurement") % (name, obj))
387 405
388 406 origvalue = getattr(obj, _sysstr(name))
389 407 class attrutil(object):
390 408 def set(self, newvalue):
391 409 setattr(obj, _sysstr(name), newvalue)
392 410 def restore(self):
393 411 setattr(obj, _sysstr(name), origvalue)
394 412
395 413 return attrutil()
396 414
397 415 # utilities to examine each internal API changes
398 416
399 417 def getbranchmapsubsettable():
400 418 # for "historical portability":
401 419 # subsettable is defined in:
402 420 # - branchmap since 2.9 (or 175c6fd8cacc)
403 421 # - repoview since 2.5 (or 59a9f18d4587)
404 422 for mod in (branchmap, repoview):
405 423 subsettable = getattr(mod, 'subsettable', None)
406 424 if subsettable:
407 425 return subsettable
408 426
409 427 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
410 428 # branchmap and repoview modules exist, but subsettable attribute
411 429 # doesn't)
412 430 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
413 431 hint=b"use 2.5 or later")
414 432
415 433 def getsvfs(repo):
416 434 """Return appropriate object to access files under .hg/store
417 435 """
418 436 # for "historical portability":
419 437 # repo.svfs has been available since 2.3 (or 7034365089bf)
420 438 svfs = getattr(repo, 'svfs', None)
421 439 if svfs:
422 440 return svfs
423 441 else:
424 442 return getattr(repo, 'sopener')
425 443
426 444 def getvfs(repo):
427 445 """Return appropriate object to access files under .hg
428 446 """
429 447 # for "historical portability":
430 448 # repo.vfs has been available since 2.3 (or 7034365089bf)
431 449 vfs = getattr(repo, 'vfs', None)
432 450 if vfs:
433 451 return vfs
434 452 else:
435 453 return getattr(repo, 'opener')
436 454
437 455 def repocleartagscachefunc(repo):
438 456 """Return the function to clear tags cache according to repo internal API
439 457 """
440 458 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
441 459 # in this case, setattr(repo, '_tagscache', None) or so isn't
442 460 # correct way to clear tags cache, because existing code paths
443 461 # expect _tagscache to be a structured object.
444 462 def clearcache():
445 463 # _tagscache has been filteredpropertycache since 2.5 (or
446 464 # 98c867ac1330), and delattr() can't work in such case
447 465 if b'_tagscache' in vars(repo):
448 466 del repo.__dict__[b'_tagscache']
449 467 return clearcache
450 468
451 469 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
452 470 if repotags: # since 1.4 (or 5614a628d173)
453 471 return lambda : repotags.set(None)
454 472
455 473 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
456 474 if repotagscache: # since 0.6 (or d7df759d0e97)
457 475 return lambda : repotagscache.set(None)
458 476
459 477 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
460 478 # this point, but it isn't so problematic, because:
461 479 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
462 480 # in perftags() causes failure soon
463 481 # - perf.py itself has been available since 1.1 (or eb240755386d)
464 482 raise error.Abort((b"tags API of this hg command is unknown"))
465 483
466 484 # utilities to clear cache
467 485
468 486 def clearfilecache(obj, attrname):
469 487 unfiltered = getattr(obj, 'unfiltered', None)
470 488 if unfiltered is not None:
471 489 obj = obj.unfiltered()
472 490 if attrname in vars(obj):
473 491 delattr(obj, attrname)
474 492 obj._filecache.pop(attrname, None)
475 493
476 494 def clearchangelog(repo):
477 495 if repo is not repo.unfiltered():
478 496 object.__setattr__(repo, r'_clcachekey', None)
479 497 object.__setattr__(repo, r'_clcache', None)
480 498 clearfilecache(repo.unfiltered(), 'changelog')
481 499
482 500 # perf commands
483 501
484 502 @command(b'perfwalk', formatteropts)
485 503 def perfwalk(ui, repo, *pats, **opts):
486 504 opts = _byteskwargs(opts)
487 505 timer, fm = gettimer(ui, opts)
488 506 m = scmutil.match(repo[None], pats, {})
489 507 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
490 508 ignored=False))))
491 509 fm.end()
492 510
493 511 @command(b'perfannotate', formatteropts)
494 512 def perfannotate(ui, repo, f, **opts):
495 513 opts = _byteskwargs(opts)
496 514 timer, fm = gettimer(ui, opts)
497 515 fc = repo[b'.'][f]
498 516 timer(lambda: len(fc.annotate(True)))
499 517 fm.end()
500 518
501 519 @command(b'perfstatus',
502 520 [(b'u', b'unknown', False,
503 521 b'ask status to look for unknown files')] + formatteropts)
504 522 def perfstatus(ui, repo, **opts):
505 523 opts = _byteskwargs(opts)
506 524 #m = match.always(repo.root, repo.getcwd())
507 525 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
508 526 # False))))
509 527 timer, fm = gettimer(ui, opts)
510 528 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
511 529 fm.end()
512 530
513 531 @command(b'perfaddremove', formatteropts)
514 532 def perfaddremove(ui, repo, **opts):
515 533 opts = _byteskwargs(opts)
516 534 timer, fm = gettimer(ui, opts)
517 535 try:
518 536 oldquiet = repo.ui.quiet
519 537 repo.ui.quiet = True
520 538 matcher = scmutil.match(repo[None])
521 539 opts[b'dry_run'] = True
522 540 if b'uipathfn' in getargspec(scmutil.addremove).args:
523 541 uipathfn = scmutil.getuipathfn(repo)
524 542 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
525 543 else:
526 544 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
527 545 finally:
528 546 repo.ui.quiet = oldquiet
529 547 fm.end()
530 548
531 549 def clearcaches(cl):
532 550 # behave somewhat consistently across internal API changes
533 551 if util.safehasattr(cl, b'clearcaches'):
534 552 cl.clearcaches()
535 553 elif util.safehasattr(cl, b'_nodecache'):
536 554 from mercurial.node import nullid, nullrev
537 555 cl._nodecache = {nullid: nullrev}
538 556 cl._nodepos = None
539 557
540 558 @command(b'perfheads', formatteropts)
541 559 def perfheads(ui, repo, **opts):
542 560 """benchmark the computation of a changelog heads"""
543 561 opts = _byteskwargs(opts)
544 562 timer, fm = gettimer(ui, opts)
545 563 cl = repo.changelog
546 564 def s():
547 565 clearcaches(cl)
548 566 def d():
549 567 len(cl.headrevs())
550 568 timer(d, setup=s)
551 569 fm.end()
552 570
553 571 @command(b'perftags', formatteropts+
554 572 [
555 573 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
556 574 ])
557 575 def perftags(ui, repo, **opts):
558 576 opts = _byteskwargs(opts)
559 577 timer, fm = gettimer(ui, opts)
560 578 repocleartagscache = repocleartagscachefunc(repo)
561 579 clearrevlogs = opts[b'clear_revlogs']
562 580 def s():
563 581 if clearrevlogs:
564 582 clearchangelog(repo)
565 583 clearfilecache(repo.unfiltered(), 'manifest')
566 584 repocleartagscache()
567 585 def t():
568 586 return len(repo.tags())
569 587 timer(t, setup=s)
570 588 fm.end()
571 589
572 590 @command(b'perfancestors', formatteropts)
573 591 def perfancestors(ui, repo, **opts):
574 592 opts = _byteskwargs(opts)
575 593 timer, fm = gettimer(ui, opts)
576 594 heads = repo.changelog.headrevs()
577 595 def d():
578 596 for a in repo.changelog.ancestors(heads):
579 597 pass
580 598 timer(d)
581 599 fm.end()
582 600
583 601 @command(b'perfancestorset', formatteropts)
584 602 def perfancestorset(ui, repo, revset, **opts):
585 603 opts = _byteskwargs(opts)
586 604 timer, fm = gettimer(ui, opts)
587 605 revs = repo.revs(revset)
588 606 heads = repo.changelog.headrevs()
589 607 def d():
590 608 s = repo.changelog.ancestors(heads)
591 609 for rev in revs:
592 610 rev in s
593 611 timer(d)
594 612 fm.end()
595 613
596 614 @command(b'perfdiscovery', formatteropts, b'PATH')
597 615 def perfdiscovery(ui, repo, path, **opts):
598 616 """benchmark discovery between local repo and the peer at given path
599 617 """
600 618 repos = [repo, None]
601 619 timer, fm = gettimer(ui, opts)
602 620 path = ui.expandpath(path)
603 621
604 622 def s():
605 623 repos[1] = hg.peer(ui, opts, path)
606 624 def d():
607 625 setdiscovery.findcommonheads(ui, *repos)
608 626 timer(d, setup=s)
609 627 fm.end()
610 628
611 629 @command(b'perfbookmarks', formatteropts +
612 630 [
613 631 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
614 632 ])
615 633 def perfbookmarks(ui, repo, **opts):
616 634 """benchmark parsing bookmarks from disk to memory"""
617 635 opts = _byteskwargs(opts)
618 636 timer, fm = gettimer(ui, opts)
619 637
620 638 clearrevlogs = opts[b'clear_revlogs']
621 639 def s():
622 640 if clearrevlogs:
623 641 clearchangelog(repo)
624 642 clearfilecache(repo, b'_bookmarks')
625 643 def d():
626 644 repo._bookmarks
627 645 timer(d, setup=s)
628 646 fm.end()
629 647
630 648 @command(b'perfbundleread', formatteropts, b'BUNDLE')
631 649 def perfbundleread(ui, repo, bundlepath, **opts):
632 650 """Benchmark reading of bundle files.
633 651
634 652 This command is meant to isolate the I/O part of bundle reading as
635 653 much as possible.
636 654 """
637 655 from mercurial import (
638 656 bundle2,
639 657 exchange,
640 658 streamclone,
641 659 )
642 660
643 661 opts = _byteskwargs(opts)
644 662
645 663 def makebench(fn):
646 664 def run():
647 665 with open(bundlepath, b'rb') as fh:
648 666 bundle = exchange.readbundle(ui, fh, bundlepath)
649 667 fn(bundle)
650 668
651 669 return run
652 670
653 671 def makereadnbytes(size):
654 672 def run():
655 673 with open(bundlepath, b'rb') as fh:
656 674 bundle = exchange.readbundle(ui, fh, bundlepath)
657 675 while bundle.read(size):
658 676 pass
659 677
660 678 return run
661 679
662 680 def makestdioread(size):
663 681 def run():
664 682 with open(bundlepath, b'rb') as fh:
665 683 while fh.read(size):
666 684 pass
667 685
668 686 return run
669 687
670 688 # bundle1
671 689
672 690 def deltaiter(bundle):
673 691 for delta in bundle.deltaiter():
674 692 pass
675 693
676 694 def iterchunks(bundle):
677 695 for chunk in bundle.getchunks():
678 696 pass
679 697
680 698 # bundle2
681 699
682 700 def forwardchunks(bundle):
683 701 for chunk in bundle._forwardchunks():
684 702 pass
685 703
686 704 def iterparts(bundle):
687 705 for part in bundle.iterparts():
688 706 pass
689 707
690 708 def iterpartsseekable(bundle):
691 709 for part in bundle.iterparts(seekable=True):
692 710 pass
693 711
694 712 def seek(bundle):
695 713 for part in bundle.iterparts(seekable=True):
696 714 part.seek(0, os.SEEK_END)
697 715
698 716 def makepartreadnbytes(size):
699 717 def run():
700 718 with open(bundlepath, b'rb') as fh:
701 719 bundle = exchange.readbundle(ui, fh, bundlepath)
702 720 for part in bundle.iterparts():
703 721 while part.read(size):
704 722 pass
705 723
706 724 return run
707 725
708 726 benches = [
709 727 (makestdioread(8192), b'read(8k)'),
710 728 (makestdioread(16384), b'read(16k)'),
711 729 (makestdioread(32768), b'read(32k)'),
712 730 (makestdioread(131072), b'read(128k)'),
713 731 ]
714 732
715 733 with open(bundlepath, b'rb') as fh:
716 734 bundle = exchange.readbundle(ui, fh, bundlepath)
717 735
718 736 if isinstance(bundle, changegroup.cg1unpacker):
719 737 benches.extend([
720 738 (makebench(deltaiter), b'cg1 deltaiter()'),
721 739 (makebench(iterchunks), b'cg1 getchunks()'),
722 740 (makereadnbytes(8192), b'cg1 read(8k)'),
723 741 (makereadnbytes(16384), b'cg1 read(16k)'),
724 742 (makereadnbytes(32768), b'cg1 read(32k)'),
725 743 (makereadnbytes(131072), b'cg1 read(128k)'),
726 744 ])
727 745 elif isinstance(bundle, bundle2.unbundle20):
728 746 benches.extend([
729 747 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
730 748 (makebench(iterparts), b'bundle2 iterparts()'),
731 749 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
732 750 (makebench(seek), b'bundle2 part seek()'),
733 751 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
734 752 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
735 753 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
736 754 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
737 755 ])
738 756 elif isinstance(bundle, streamclone.streamcloneapplier):
739 757 raise error.Abort(b'stream clone bundles not supported')
740 758 else:
741 759 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
742 760
743 761 for fn, title in benches:
744 762 timer, fm = gettimer(ui, opts)
745 763 timer(fn, title=title)
746 764 fm.end()
747 765
748 766 @command(b'perfchangegroupchangelog', formatteropts +
749 767 [(b'', b'cgversion', b'02', b'changegroup version'),
750 768 (b'r', b'rev', b'', b'revisions to add to changegroup')])
751 769 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
752 770 """Benchmark producing a changelog group for a changegroup.
753 771
754 772 This measures the time spent processing the changelog during a
755 773 bundle operation. This occurs during `hg bundle` and on a server
756 774 processing a `getbundle` wire protocol request (handles clones
757 775 and pull requests).
758 776
759 777 By default, all revisions are added to the changegroup.
760 778 """
761 779 opts = _byteskwargs(opts)
762 780 cl = repo.changelog
763 781 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
764 782 bundler = changegroup.getbundler(cgversion, repo)
765 783
766 784 def d():
767 785 state, chunks = bundler._generatechangelog(cl, nodes)
768 786 for chunk in chunks:
769 787 pass
770 788
771 789 timer, fm = gettimer(ui, opts)
772 790
773 791 # Terminal printing can interfere with timing. So disable it.
774 792 with ui.configoverride({(b'progress', b'disable'): True}):
775 793 timer(d)
776 794
777 795 fm.end()
778 796
779 797 @command(b'perfdirs', formatteropts)
780 798 def perfdirs(ui, repo, **opts):
781 799 opts = _byteskwargs(opts)
782 800 timer, fm = gettimer(ui, opts)
783 801 dirstate = repo.dirstate
784 802 b'a' in dirstate
785 803 def d():
786 804 dirstate.hasdir(b'a')
787 805 del dirstate._map._dirs
788 806 timer(d)
789 807 fm.end()
790 808
791 809 @command(b'perfdirstate', formatteropts)
792 810 def perfdirstate(ui, repo, **opts):
793 811 opts = _byteskwargs(opts)
794 812 timer, fm = gettimer(ui, opts)
795 813 b"a" in repo.dirstate
796 814 def d():
797 815 repo.dirstate.invalidate()
798 816 b"a" in repo.dirstate
799 817 timer(d)
800 818 fm.end()
801 819
802 820 @command(b'perfdirstatedirs', formatteropts)
803 821 def perfdirstatedirs(ui, repo, **opts):
804 822 opts = _byteskwargs(opts)
805 823 timer, fm = gettimer(ui, opts)
806 824 b"a" in repo.dirstate
807 825 def d():
808 826 repo.dirstate.hasdir(b"a")
809 827 del repo.dirstate._map._dirs
810 828 timer(d)
811 829 fm.end()
812 830
813 831 @command(b'perfdirstatefoldmap', formatteropts)
814 832 def perfdirstatefoldmap(ui, repo, **opts):
815 833 opts = _byteskwargs(opts)
816 834 timer, fm = gettimer(ui, opts)
817 835 dirstate = repo.dirstate
818 836 b'a' in dirstate
819 837 def d():
820 838 dirstate._map.filefoldmap.get(b'a')
821 839 del dirstate._map.filefoldmap
822 840 timer(d)
823 841 fm.end()
824 842
825 843 @command(b'perfdirfoldmap', formatteropts)
826 844 def perfdirfoldmap(ui, repo, **opts):
827 845 opts = _byteskwargs(opts)
828 846 timer, fm = gettimer(ui, opts)
829 847 dirstate = repo.dirstate
830 848 b'a' in dirstate
831 849 def d():
832 850 dirstate._map.dirfoldmap.get(b'a')
833 851 del dirstate._map.dirfoldmap
834 852 del dirstate._map._dirs
835 853 timer(d)
836 854 fm.end()
837 855
838 856 @command(b'perfdirstatewrite', formatteropts)
839 857 def perfdirstatewrite(ui, repo, **opts):
840 858 opts = _byteskwargs(opts)
841 859 timer, fm = gettimer(ui, opts)
842 860 ds = repo.dirstate
843 861 b"a" in ds
844 862 def d():
845 863 ds._dirty = True
846 864 ds.write(repo.currenttransaction())
847 865 timer(d)
848 866 fm.end()
849 867
850 868 @command(b'perfmergecalculate',
851 869 [(b'r', b'rev', b'.', b'rev to merge against')] + formatteropts)
852 870 def perfmergecalculate(ui, repo, rev, **opts):
853 871 opts = _byteskwargs(opts)
854 872 timer, fm = gettimer(ui, opts)
855 873 wctx = repo[None]
856 874 rctx = scmutil.revsingle(repo, rev, rev)
857 875 ancestor = wctx.ancestor(rctx)
858 876 # we don't want working dir files to be stat'd in the benchmark, so prime
859 877 # that cache
860 878 wctx.dirty()
861 879 def d():
862 880 # acceptremote is True because we don't want prompts in the middle of
863 881 # our benchmark
864 882 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
865 883 acceptremote=True, followcopies=True)
866 884 timer(d)
867 885 fm.end()
868 886
869 887 @command(b'perfpathcopies', [], b"REV REV")
870 888 def perfpathcopies(ui, repo, rev1, rev2, **opts):
871 889 """benchmark the copy tracing logic"""
872 890 opts = _byteskwargs(opts)
873 891 timer, fm = gettimer(ui, opts)
874 892 ctx1 = scmutil.revsingle(repo, rev1, rev1)
875 893 ctx2 = scmutil.revsingle(repo, rev2, rev2)
876 894 def d():
877 895 copies.pathcopies(ctx1, ctx2)
878 896 timer(d)
879 897 fm.end()
880 898
881 899 @command(b'perfphases',
882 900 [(b'', b'full', False, b'include file reading time too'),
883 901 ], b"")
884 902 def perfphases(ui, repo, **opts):
885 903 """benchmark phasesets computation"""
886 904 opts = _byteskwargs(opts)
887 905 timer, fm = gettimer(ui, opts)
888 906 _phases = repo._phasecache
889 907 full = opts.get(b'full')
890 908 def d():
891 909 phases = _phases
892 910 if full:
893 911 clearfilecache(repo, b'_phasecache')
894 912 phases = repo._phasecache
895 913 phases.invalidate()
896 914 phases.loadphaserevs(repo)
897 915 timer(d)
898 916 fm.end()
899 917
900 918 @command(b'perfphasesremote',
901 919 [], b"[DEST]")
902 920 def perfphasesremote(ui, repo, dest=None, **opts):
903 921 """benchmark time needed to analyse phases of the remote server"""
904 922 from mercurial.node import (
905 923 bin,
906 924 )
907 925 from mercurial import (
908 926 exchange,
909 927 hg,
910 928 phases,
911 929 )
912 930 opts = _byteskwargs(opts)
913 931 timer, fm = gettimer(ui, opts)
914 932
915 933 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
916 934 if not path:
917 935 raise error.Abort((b'default repository not configured!'),
918 936 hint=(b"see 'hg help config.paths'"))
919 937 dest = path.pushloc or path.loc
920 938 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
921 939 other = hg.peer(repo, opts, dest)
922 940
923 941 # easier to perform discovery through the operation
924 942 op = exchange.pushoperation(repo, other)
925 943 exchange._pushdiscoverychangeset(op)
926 944
927 945 remotesubset = op.fallbackheads
928 946
929 947 with other.commandexecutor() as e:
930 948 remotephases = e.callcommand(b'listkeys',
931 949 {b'namespace': b'phases'}).result()
932 950 del other
933 951 publishing = remotephases.get(b'publishing', False)
934 952 if publishing:
935 953 ui.status((b'publishing: yes\n'))
936 954 else:
937 955 ui.status((b'publishing: no\n'))
938 956
939 957 nodemap = repo.changelog.nodemap
940 958 nonpublishroots = 0
941 959 for nhex, phase in remotephases.iteritems():
942 960 if nhex == b'publishing': # ignore data related to publish option
943 961 continue
944 962 node = bin(nhex)
945 963 if node in nodemap and int(phase):
946 964 nonpublishroots += 1
947 965 ui.status((b'number of roots: %d\n') % len(remotephases))
948 966 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
949 967 def d():
950 968 phases.remotephasessummary(repo,
951 969 remotesubset,
952 970 remotephases)
953 971 timer(d)
954 972 fm.end()
955 973
956 974 @command(b'perfmanifest',[
957 975 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
958 976 (b'', b'clear-disk', False, b'clear on-disk caches too'),
959 977 ] + formatteropts, b'REV|NODE')
960 978 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
961 979 """benchmark the time to read a manifest from disk and return a usable
962 980 dict-like object
963 981
964 982 Manifest caches are cleared before retrieval."""
965 983 opts = _byteskwargs(opts)
966 984 timer, fm = gettimer(ui, opts)
967 985 if not manifest_rev:
968 986 ctx = scmutil.revsingle(repo, rev, rev)
969 987 t = ctx.manifestnode()
970 988 else:
971 989 from mercurial.node import bin
972 990
973 991 if len(rev) == 40:
974 992 t = bin(rev)
975 993 else:
976 994 try:
977 995 rev = int(rev)
978 996
979 997 if util.safehasattr(repo.manifestlog, b'getstorage'):
980 998 t = repo.manifestlog.getstorage(b'').node(rev)
981 999 else:
982 1000 t = repo.manifestlog._revlog.lookup(rev)
983 1001 except ValueError:
984 1002 raise error.Abort(b'manifest revision must be integer or full '
985 1003 b'node')
986 1004 def d():
987 1005 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
988 1006 repo.manifestlog[t].read()
989 1007 timer(d)
990 1008 fm.end()
991 1009
992 1010 @command(b'perfchangeset', formatteropts)
993 1011 def perfchangeset(ui, repo, rev, **opts):
994 1012 opts = _byteskwargs(opts)
995 1013 timer, fm = gettimer(ui, opts)
996 1014 n = scmutil.revsingle(repo, rev).node()
997 1015 def d():
998 1016 repo.changelog.read(n)
999 1017 #repo.changelog._cache = None
1000 1018 timer(d)
1001 1019 fm.end()
1002 1020
1003 1021 @command(b'perfignore', formatteropts)
1004 1022 def perfignore(ui, repo, **opts):
1005 1023 """benchmark operation related to computing ignore"""
1006 1024 opts = _byteskwargs(opts)
1007 1025 timer, fm = gettimer(ui, opts)
1008 1026 dirstate = repo.dirstate
1009 1027
1010 1028 def setupone():
1011 1029 dirstate.invalidate()
1012 1030 clearfilecache(dirstate, b'_ignore')
1013 1031
1014 1032 def runone():
1015 1033 dirstate._ignore
1016 1034
1017 1035 timer(runone, setup=setupone, title=b"load")
1018 1036 fm.end()
1019 1037
1020 1038 @command(b'perfindex', [
1021 1039 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1022 1040 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1023 1041 ] + formatteropts)
1024 1042 def perfindex(ui, repo, **opts):
1025 1043 """benchmark index creation time followed by a lookup
1026 1044
1027 1045 The default is to look `tip` up. Depending on the index implementation,
1028 1046 the revision looked up can matters. For example, an implementation
1029 1047 scanning the index will have a faster lookup time for `--rev tip` than for
1030 1048 `--rev 0`. The number of looked up revisions and their order can also
1031 1049 matters.
1032 1050
1033 1051 Example of useful set to test:
1034 1052 * tip
1035 1053 * 0
1036 1054 * -10:
1037 1055 * :10
1038 1056 * -10: + :10
1039 1057 * :10: + -10:
1040 1058 * -10000:
1041 1059 * -10000: + 0
1042 1060
1043 1061 It is not currently possible to check for lookup of a missing node. For
1044 1062 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1045 1063 import mercurial.revlog
1046 1064 opts = _byteskwargs(opts)
1047 1065 timer, fm = gettimer(ui, opts)
1048 1066 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1049 1067 if opts[b'no_lookup']:
1050 1068 if opts['rev']:
1051 1069 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1052 1070 nodes = []
1053 1071 elif not opts[b'rev']:
1054 1072 nodes = [repo[b"tip"].node()]
1055 1073 else:
1056 1074 revs = scmutil.revrange(repo, opts[b'rev'])
1057 1075 cl = repo.changelog
1058 1076 nodes = [cl.node(r) for r in revs]
1059 1077
1060 1078 unfi = repo.unfiltered()
1061 1079 # find the filecache func directly
1062 1080 # This avoid polluting the benchmark with the filecache logic
1063 1081 makecl = unfi.__class__.changelog.func
1064 1082 def setup():
1065 1083 # probably not necessary, but for good measure
1066 1084 clearchangelog(unfi)
1067 1085 def d():
1068 1086 cl = makecl(unfi)
1069 1087 for n in nodes:
1070 1088 cl.rev(n)
1071 1089 timer(d, setup=setup)
1072 1090 fm.end()
1073 1091
1074 1092 @command(b'perfnodemap', [
1075 1093 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1076 1094 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1077 1095 ] + formatteropts)
1078 1096 def perfnodemap(ui, repo, **opts):
1079 1097 """benchmark the time necessary to look up revision from a cold nodemap
1080 1098
1081 1099 Depending on the implementation, the amount and order of revision we look
1082 1100 up can varies. Example of useful set to test:
1083 1101 * tip
1084 1102 * 0
1085 1103 * -10:
1086 1104 * :10
1087 1105 * -10: + :10
1088 1106 * :10: + -10:
1089 1107 * -10000:
1090 1108 * -10000: + 0
1091 1109
1092 1110 The command currently focus on valid binary lookup. Benchmarking for
1093 1111 hexlookup, prefix lookup and missing lookup would also be valuable.
1094 1112 """
1095 1113 import mercurial.revlog
1096 1114 opts = _byteskwargs(opts)
1097 1115 timer, fm = gettimer(ui, opts)
1098 1116 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1099 1117
1100 1118 unfi = repo.unfiltered()
1101 1119 clearcaches = opts['clear_caches']
1102 1120 # find the filecache func directly
1103 1121 # This avoid polluting the benchmark with the filecache logic
1104 1122 makecl = unfi.__class__.changelog.func
1105 1123 if not opts[b'rev']:
1106 1124 raise error.Abort('use --rev to specify revisions to look up')
1107 1125 revs = scmutil.revrange(repo, opts[b'rev'])
1108 1126 cl = repo.changelog
1109 1127 nodes = [cl.node(r) for r in revs]
1110 1128
1111 1129 # use a list to pass reference to a nodemap from one closure to the next
1112 1130 nodeget = [None]
1113 1131 def setnodeget():
1114 1132 # probably not necessary, but for good measure
1115 1133 clearchangelog(unfi)
1116 1134 nodeget[0] = makecl(unfi).nodemap.get
1117 1135
1118 1136 def d():
1119 1137 get = nodeget[0]
1120 1138 for n in nodes:
1121 1139 get(n)
1122 1140
1123 1141 setup = None
1124 1142 if clearcaches:
1125 1143 def setup():
1126 1144 setnodeget()
1127 1145 else:
1128 1146 setnodeget()
1129 1147 d() # prewarm the data structure
1130 1148 timer(d, setup=setup)
1131 1149 fm.end()
1132 1150
1133 1151 @command(b'perfstartup', formatteropts)
1134 1152 def perfstartup(ui, repo, **opts):
1135 1153 opts = _byteskwargs(opts)
1136 1154 timer, fm = gettimer(ui, opts)
1137 1155 def d():
1138 1156 if os.name != r'nt':
1139 1157 os.system(b"HGRCPATH= %s version -q > /dev/null" %
1140 1158 fsencode(sys.argv[0]))
1141 1159 else:
1142 1160 os.environ[r'HGRCPATH'] = r' '
1143 1161 os.system(r"%s version -q > NUL" % sys.argv[0])
1144 1162 timer(d)
1145 1163 fm.end()
1146 1164
1147 1165 @command(b'perfparents', formatteropts)
1148 1166 def perfparents(ui, repo, **opts):
1149 1167 opts = _byteskwargs(opts)
1150 1168 timer, fm = gettimer(ui, opts)
1151 1169 # control the number of commits perfparents iterates over
1152 1170 # experimental config: perf.parentscount
1153 1171 count = getint(ui, b"perf", b"parentscount", 1000)
1154 1172 if len(repo.changelog) < count:
1155 1173 raise error.Abort(b"repo needs %d commits for this test" % count)
1156 1174 repo = repo.unfiltered()
1157 1175 nl = [repo.changelog.node(i) for i in _xrange(count)]
1158 1176 def d():
1159 1177 for n in nl:
1160 1178 repo.changelog.parents(n)
1161 1179 timer(d)
1162 1180 fm.end()
1163 1181
1164 1182 @command(b'perfctxfiles', formatteropts)
1165 1183 def perfctxfiles(ui, repo, x, **opts):
1166 1184 opts = _byteskwargs(opts)
1167 1185 x = int(x)
1168 1186 timer, fm = gettimer(ui, opts)
1169 1187 def d():
1170 1188 len(repo[x].files())
1171 1189 timer(d)
1172 1190 fm.end()
1173 1191
1174 1192 @command(b'perfrawfiles', formatteropts)
1175 1193 def perfrawfiles(ui, repo, x, **opts):
1176 1194 opts = _byteskwargs(opts)
1177 1195 x = int(x)
1178 1196 timer, fm = gettimer(ui, opts)
1179 1197 cl = repo.changelog
1180 1198 def d():
1181 1199 len(cl.read(x)[3])
1182 1200 timer(d)
1183 1201 fm.end()
1184 1202
1185 1203 @command(b'perflookup', formatteropts)
1186 1204 def perflookup(ui, repo, rev, **opts):
1187 1205 opts = _byteskwargs(opts)
1188 1206 timer, fm = gettimer(ui, opts)
1189 1207 timer(lambda: len(repo.lookup(rev)))
1190 1208 fm.end()
1191 1209
1192 1210 @command(b'perflinelogedits',
1193 1211 [(b'n', b'edits', 10000, b'number of edits'),
1194 1212 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1195 1213 ], norepo=True)
1196 1214 def perflinelogedits(ui, **opts):
1197 1215 from mercurial import linelog
1198 1216
1199 1217 opts = _byteskwargs(opts)
1200 1218
1201 1219 edits = opts[b'edits']
1202 1220 maxhunklines = opts[b'max_hunk_lines']
1203 1221
1204 1222 maxb1 = 100000
1205 1223 random.seed(0)
1206 1224 randint = random.randint
1207 1225 currentlines = 0
1208 1226 arglist = []
1209 1227 for rev in _xrange(edits):
1210 1228 a1 = randint(0, currentlines)
1211 1229 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1212 1230 b1 = randint(0, maxb1)
1213 1231 b2 = randint(b1, b1 + maxhunklines)
1214 1232 currentlines += (b2 - b1) - (a2 - a1)
1215 1233 arglist.append((rev, a1, a2, b1, b2))
1216 1234
1217 1235 def d():
1218 1236 ll = linelog.linelog()
1219 1237 for args in arglist:
1220 1238 ll.replacelines(*args)
1221 1239
1222 1240 timer, fm = gettimer(ui, opts)
1223 1241 timer(d)
1224 1242 fm.end()
1225 1243
1226 1244 @command(b'perfrevrange', formatteropts)
1227 1245 def perfrevrange(ui, repo, *specs, **opts):
1228 1246 opts = _byteskwargs(opts)
1229 1247 timer, fm = gettimer(ui, opts)
1230 1248 revrange = scmutil.revrange
1231 1249 timer(lambda: len(revrange(repo, specs)))
1232 1250 fm.end()
1233 1251
1234 1252 @command(b'perfnodelookup', formatteropts)
1235 1253 def perfnodelookup(ui, repo, rev, **opts):
1236 1254 opts = _byteskwargs(opts)
1237 1255 timer, fm = gettimer(ui, opts)
1238 1256 import mercurial.revlog
1239 1257 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1240 1258 n = scmutil.revsingle(repo, rev).node()
1241 1259 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1242 1260 def d():
1243 1261 cl.rev(n)
1244 1262 clearcaches(cl)
1245 1263 timer(d)
1246 1264 fm.end()
1247 1265
1248 1266 @command(b'perflog',
1249 1267 [(b'', b'rename', False, b'ask log to follow renames')
1250 1268 ] + formatteropts)
1251 1269 def perflog(ui, repo, rev=None, **opts):
1252 1270 opts = _byteskwargs(opts)
1253 1271 if rev is None:
1254 1272 rev=[]
1255 1273 timer, fm = gettimer(ui, opts)
1256 1274 ui.pushbuffer()
1257 1275 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1258 1276 copies=opts.get(b'rename')))
1259 1277 ui.popbuffer()
1260 1278 fm.end()
1261 1279
1262 1280 @command(b'perfmoonwalk', formatteropts)
1263 1281 def perfmoonwalk(ui, repo, **opts):
1264 1282 """benchmark walking the changelog backwards
1265 1283
1266 1284 This also loads the changelog data for each revision in the changelog.
1267 1285 """
1268 1286 opts = _byteskwargs(opts)
1269 1287 timer, fm = gettimer(ui, opts)
1270 1288 def moonwalk():
1271 1289 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1272 1290 ctx = repo[i]
1273 1291 ctx.branch() # read changelog data (in addition to the index)
1274 1292 timer(moonwalk)
1275 1293 fm.end()
1276 1294
1277 1295 @command(b'perftemplating',
1278 1296 [(b'r', b'rev', [], b'revisions to run the template on'),
1279 1297 ] + formatteropts)
1280 1298 def perftemplating(ui, repo, testedtemplate=None, **opts):
1281 1299 """test the rendering time of a given template"""
1282 1300 if makelogtemplater is None:
1283 1301 raise error.Abort((b"perftemplating not available with this Mercurial"),
1284 1302 hint=b"use 4.3 or later")
1285 1303
1286 1304 opts = _byteskwargs(opts)
1287 1305
1288 1306 nullui = ui.copy()
1289 1307 nullui.fout = open(os.devnull, r'wb')
1290 1308 nullui.disablepager()
1291 1309 revs = opts.get(b'rev')
1292 1310 if not revs:
1293 1311 revs = [b'all()']
1294 1312 revs = list(scmutil.revrange(repo, revs))
1295 1313
1296 1314 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1297 1315 b' {author|person}: {desc|firstline}\n')
1298 1316 if testedtemplate is None:
1299 1317 testedtemplate = defaulttemplate
1300 1318 displayer = makelogtemplater(nullui, repo, testedtemplate)
1301 1319 def format():
1302 1320 for r in revs:
1303 1321 ctx = repo[r]
1304 1322 displayer.show(ctx)
1305 1323 displayer.flush(ctx)
1306 1324
1307 1325 timer, fm = gettimer(ui, opts)
1308 1326 timer(format)
1309 1327 fm.end()
1310 1328
1311 1329 @command(b'perfhelper-pathcopies', formatteropts +
1312 1330 [
1313 1331 (b'r', b'revs', [], b'restrict search to these revisions'),
1314 1332 (b'', b'timing', False, b'provides extra data (costly)'),
1315 1333 ])
1316 1334 def perfhelperpathcopies(ui, repo, revs=[], **opts):
1317 1335 """find statistic about potential parameters for the `perftracecopies`
1318 1336
1319 1337 This command find source-destination pair relevant for copytracing testing.
1320 1338 It report value for some of the parameters that impact copy tracing time.
1321 1339
1322 1340 If `--timing` is set, rename detection is run and the associated timing
1323 1341 will be reported. The extra details comes at the cost of a slower command
1324 1342 execution.
1325 1343
1326 1344 Since the rename detection is only run once, other factors might easily
1327 1345 affect the precision of the timing. However it should give a good
1328 1346 approximation of which revision pairs are very costly.
1329 1347 """
1330 1348 opts = _byteskwargs(opts)
1331 1349 fm = ui.formatter(b'perf', opts)
1332 1350 dotiming = opts[b'timing']
1333 1351
1334 1352 if dotiming:
1335 1353 header = '%12s %12s %12s %12s %12s %12s\n'
1336 1354 output = ("%(source)12s %(destination)12s "
1337 1355 "%(nbrevs)12d %(nbmissingfiles)12d "
1338 1356 "%(nbrenamedfiles)12d %(time)18.5f\n")
1339 1357 header_names = ("source", "destination", "nb-revs", "nb-files",
1340 1358 "nb-renames", "time")
1341 1359 fm.plain(header % header_names)
1342 1360 else:
1343 1361 header = '%12s %12s %12s %12s\n'
1344 1362 output = ("%(source)12s %(destination)12s "
1345 1363 "%(nbrevs)12d %(nbmissingfiles)12d\n")
1346 1364 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
1347 1365
1348 1366 if not revs:
1349 1367 revs = ['all()']
1350 1368 revs = scmutil.revrange(repo, revs)
1351 1369
1352 1370 roi = repo.revs('merge() and %ld', revs)
1353 1371 for r in roi:
1354 1372 ctx = repo[r]
1355 1373 p1 = ctx.p1().rev()
1356 1374 p2 = ctx.p2().rev()
1357 1375 bases = repo.changelog._commonancestorsheads(p1, p2)
1358 1376 for p in (p1, p2):
1359 1377 for b in bases:
1360 1378 base = repo[b]
1361 1379 parent = repo[p]
1362 1380 missing = copies._computeforwardmissing(base, parent)
1363 1381 if not missing:
1364 1382 continue
1365 1383 data = {
1366 1384 b'source': base.hex(),
1367 1385 b'destination': parent.hex(),
1368 1386 b'nbrevs': len(repo.revs('%d::%d', b, p)),
1369 1387 b'nbmissingfiles': len(missing),
1370 1388 }
1371 1389 if dotiming:
1372 1390 begin = util.timer()
1373 1391 renames = copies.pathcopies(base, parent)
1374 1392 end = util.timer()
1375 1393 # not very stable timing since we did only one run
1376 1394 data['time'] = end - begin
1377 1395 data['nbrenamedfiles'] = len(renames)
1378 1396 fm.startitem()
1379 1397 fm.data(**data)
1380 1398 out = data.copy()
1381 1399 out['source'] = fm.hexfunc(base.node())
1382 1400 out['destination'] = fm.hexfunc(parent.node())
1383 1401 fm.plain(output % out)
1384 1402
1385 1403 fm.end()
1386 1404
1387 1405 @command(b'perfcca', formatteropts)
1388 1406 def perfcca(ui, repo, **opts):
1389 1407 opts = _byteskwargs(opts)
1390 1408 timer, fm = gettimer(ui, opts)
1391 1409 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1392 1410 fm.end()
1393 1411
1394 1412 @command(b'perffncacheload', formatteropts)
1395 1413 def perffncacheload(ui, repo, **opts):
1396 1414 opts = _byteskwargs(opts)
1397 1415 timer, fm = gettimer(ui, opts)
1398 1416 s = repo.store
1399 1417 def d():
1400 1418 s.fncache._load()
1401 1419 timer(d)
1402 1420 fm.end()
1403 1421
1404 1422 @command(b'perffncachewrite', formatteropts)
1405 1423 def perffncachewrite(ui, repo, **opts):
1406 1424 opts = _byteskwargs(opts)
1407 1425 timer, fm = gettimer(ui, opts)
1408 1426 s = repo.store
1409 1427 lock = repo.lock()
1410 1428 s.fncache._load()
1411 1429 tr = repo.transaction(b'perffncachewrite')
1412 1430 tr.addbackup(b'fncache')
1413 1431 def d():
1414 1432 s.fncache._dirty = True
1415 1433 s.fncache.write(tr)
1416 1434 timer(d)
1417 1435 tr.close()
1418 1436 lock.release()
1419 1437 fm.end()
1420 1438
1421 1439 @command(b'perffncacheencode', formatteropts)
1422 1440 def perffncacheencode(ui, repo, **opts):
1423 1441 opts = _byteskwargs(opts)
1424 1442 timer, fm = gettimer(ui, opts)
1425 1443 s = repo.store
1426 1444 s.fncache._load()
1427 1445 def d():
1428 1446 for p in s.fncache.entries:
1429 1447 s.encode(p)
1430 1448 timer(d)
1431 1449 fm.end()
1432 1450
1433 1451 def _bdiffworker(q, blocks, xdiff, ready, done):
1434 1452 while not done.is_set():
1435 1453 pair = q.get()
1436 1454 while pair is not None:
1437 1455 if xdiff:
1438 1456 mdiff.bdiff.xdiffblocks(*pair)
1439 1457 elif blocks:
1440 1458 mdiff.bdiff.blocks(*pair)
1441 1459 else:
1442 1460 mdiff.textdiff(*pair)
1443 1461 q.task_done()
1444 1462 pair = q.get()
1445 1463 q.task_done() # for the None one
1446 1464 with ready:
1447 1465 ready.wait()
1448 1466
1449 1467 def _manifestrevision(repo, mnode):
1450 1468 ml = repo.manifestlog
1451 1469
1452 1470 if util.safehasattr(ml, b'getstorage'):
1453 1471 store = ml.getstorage(b'')
1454 1472 else:
1455 1473 store = ml._revlog
1456 1474
1457 1475 return store.revision(mnode)
1458 1476
1459 1477 @command(b'perfbdiff', revlogopts + formatteropts + [
1460 1478 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1461 1479 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1462 1480 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1463 1481 (b'', b'blocks', False, b'test computing diffs into blocks'),
1464 1482 (b'', b'xdiff', False, b'use xdiff algorithm'),
1465 1483 ],
1466 1484
1467 1485 b'-c|-m|FILE REV')
1468 1486 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1469 1487 """benchmark a bdiff between revisions
1470 1488
1471 1489 By default, benchmark a bdiff between its delta parent and itself.
1472 1490
1473 1491 With ``--count``, benchmark bdiffs between delta parents and self for N
1474 1492 revisions starting at the specified revision.
1475 1493
1476 1494 With ``--alldata``, assume the requested revision is a changeset and
1477 1495 measure bdiffs for all changes related to that changeset (manifest
1478 1496 and filelogs).
1479 1497 """
1480 1498 opts = _byteskwargs(opts)
1481 1499
1482 1500 if opts[b'xdiff'] and not opts[b'blocks']:
1483 1501 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1484 1502
1485 1503 if opts[b'alldata']:
1486 1504 opts[b'changelog'] = True
1487 1505
1488 1506 if opts.get(b'changelog') or opts.get(b'manifest'):
1489 1507 file_, rev = None, file_
1490 1508 elif rev is None:
1491 1509 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1492 1510
1493 1511 blocks = opts[b'blocks']
1494 1512 xdiff = opts[b'xdiff']
1495 1513 textpairs = []
1496 1514
1497 1515 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1498 1516
1499 1517 startrev = r.rev(r.lookup(rev))
1500 1518 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1501 1519 if opts[b'alldata']:
1502 1520 # Load revisions associated with changeset.
1503 1521 ctx = repo[rev]
1504 1522 mtext = _manifestrevision(repo, ctx.manifestnode())
1505 1523 for pctx in ctx.parents():
1506 1524 pman = _manifestrevision(repo, pctx.manifestnode())
1507 1525 textpairs.append((pman, mtext))
1508 1526
1509 1527 # Load filelog revisions by iterating manifest delta.
1510 1528 man = ctx.manifest()
1511 1529 pman = ctx.p1().manifest()
1512 1530 for filename, change in pman.diff(man).items():
1513 1531 fctx = repo.file(filename)
1514 1532 f1 = fctx.revision(change[0][0] or -1)
1515 1533 f2 = fctx.revision(change[1][0] or -1)
1516 1534 textpairs.append((f1, f2))
1517 1535 else:
1518 1536 dp = r.deltaparent(rev)
1519 1537 textpairs.append((r.revision(dp), r.revision(rev)))
1520 1538
1521 1539 withthreads = threads > 0
1522 1540 if not withthreads:
1523 1541 def d():
1524 1542 for pair in textpairs:
1525 1543 if xdiff:
1526 1544 mdiff.bdiff.xdiffblocks(*pair)
1527 1545 elif blocks:
1528 1546 mdiff.bdiff.blocks(*pair)
1529 1547 else:
1530 1548 mdiff.textdiff(*pair)
1531 1549 else:
1532 1550 q = queue()
1533 1551 for i in _xrange(threads):
1534 1552 q.put(None)
1535 1553 ready = threading.Condition()
1536 1554 done = threading.Event()
1537 1555 for i in _xrange(threads):
1538 1556 threading.Thread(target=_bdiffworker,
1539 1557 args=(q, blocks, xdiff, ready, done)).start()
1540 1558 q.join()
1541 1559 def d():
1542 1560 for pair in textpairs:
1543 1561 q.put(pair)
1544 1562 for i in _xrange(threads):
1545 1563 q.put(None)
1546 1564 with ready:
1547 1565 ready.notify_all()
1548 1566 q.join()
1549 1567 timer, fm = gettimer(ui, opts)
1550 1568 timer(d)
1551 1569 fm.end()
1552 1570
1553 1571 if withthreads:
1554 1572 done.set()
1555 1573 for i in _xrange(threads):
1556 1574 q.put(None)
1557 1575 with ready:
1558 1576 ready.notify_all()
1559 1577
1560 1578 @command(b'perfunidiff', revlogopts + formatteropts + [
1561 1579 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1562 1580 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
1563 1581 ], b'-c|-m|FILE REV')
1564 1582 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1565 1583 """benchmark a unified diff between revisions
1566 1584
1567 1585 This doesn't include any copy tracing - it's just a unified diff
1568 1586 of the texts.
1569 1587
1570 1588 By default, benchmark a diff between its delta parent and itself.
1571 1589
1572 1590 With ``--count``, benchmark diffs between delta parents and self for N
1573 1591 revisions starting at the specified revision.
1574 1592
1575 1593 With ``--alldata``, assume the requested revision is a changeset and
1576 1594 measure diffs for all changes related to that changeset (manifest
1577 1595 and filelogs).
1578 1596 """
1579 1597 opts = _byteskwargs(opts)
1580 1598 if opts[b'alldata']:
1581 1599 opts[b'changelog'] = True
1582 1600
1583 1601 if opts.get(b'changelog') or opts.get(b'manifest'):
1584 1602 file_, rev = None, file_
1585 1603 elif rev is None:
1586 1604 raise error.CommandError(b'perfunidiff', b'invalid arguments')
1587 1605
1588 1606 textpairs = []
1589 1607
1590 1608 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
1591 1609
1592 1610 startrev = r.rev(r.lookup(rev))
1593 1611 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1594 1612 if opts[b'alldata']:
1595 1613 # Load revisions associated with changeset.
1596 1614 ctx = repo[rev]
1597 1615 mtext = _manifestrevision(repo, ctx.manifestnode())
1598 1616 for pctx in ctx.parents():
1599 1617 pman = _manifestrevision(repo, pctx.manifestnode())
1600 1618 textpairs.append((pman, mtext))
1601 1619
1602 1620 # Load filelog revisions by iterating manifest delta.
1603 1621 man = ctx.manifest()
1604 1622 pman = ctx.p1().manifest()
1605 1623 for filename, change in pman.diff(man).items():
1606 1624 fctx = repo.file(filename)
1607 1625 f1 = fctx.revision(change[0][0] or -1)
1608 1626 f2 = fctx.revision(change[1][0] or -1)
1609 1627 textpairs.append((f1, f2))
1610 1628 else:
1611 1629 dp = r.deltaparent(rev)
1612 1630 textpairs.append((r.revision(dp), r.revision(rev)))
1613 1631
1614 1632 def d():
1615 1633 for left, right in textpairs:
1616 1634 # The date strings don't matter, so we pass empty strings.
1617 1635 headerlines, hunks = mdiff.unidiff(
1618 1636 left, b'', right, b'', b'left', b'right', binary=False)
1619 1637 # consume iterators in roughly the way patch.py does
1620 1638 b'\n'.join(headerlines)
1621 1639 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1622 1640 timer, fm = gettimer(ui, opts)
1623 1641 timer(d)
1624 1642 fm.end()
1625 1643
1626 1644 @command(b'perfdiffwd', formatteropts)
1627 1645 def perfdiffwd(ui, repo, **opts):
1628 1646 """Profile diff of working directory changes"""
1629 1647 opts = _byteskwargs(opts)
1630 1648 timer, fm = gettimer(ui, opts)
1631 1649 options = {
1632 1650 'w': 'ignore_all_space',
1633 1651 'b': 'ignore_space_change',
1634 1652 'B': 'ignore_blank_lines',
1635 1653 }
1636 1654
1637 1655 for diffopt in ('', 'w', 'b', 'B', 'wB'):
1638 1656 opts = dict((options[c], b'1') for c in diffopt)
1639 1657 def d():
1640 1658 ui.pushbuffer()
1641 1659 commands.diff(ui, repo, **opts)
1642 1660 ui.popbuffer()
1643 1661 diffopt = diffopt.encode('ascii')
1644 1662 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
1645 1663 timer(d, title=title)
1646 1664 fm.end()
1647 1665
1648 1666 @command(b'perfrevlogindex', revlogopts + formatteropts,
1649 1667 b'-c|-m|FILE')
1650 1668 def perfrevlogindex(ui, repo, file_=None, **opts):
1651 1669 """Benchmark operations against a revlog index.
1652 1670
1653 1671 This tests constructing a revlog instance, reading index data,
1654 1672 parsing index data, and performing various operations related to
1655 1673 index data.
1656 1674 """
1657 1675
1658 1676 opts = _byteskwargs(opts)
1659 1677
1660 1678 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
1661 1679
1662 1680 opener = getattr(rl, 'opener') # trick linter
1663 1681 indexfile = rl.indexfile
1664 1682 data = opener.read(indexfile)
1665 1683
1666 1684 header = struct.unpack(b'>I', data[0:4])[0]
1667 1685 version = header & 0xFFFF
1668 1686 if version == 1:
1669 1687 revlogio = revlog.revlogio()
1670 1688 inline = header & (1 << 16)
1671 1689 else:
1672 1690 raise error.Abort((b'unsupported revlog version: %d') % version)
1673 1691
1674 1692 rllen = len(rl)
1675 1693
1676 1694 node0 = rl.node(0)
1677 1695 node25 = rl.node(rllen // 4)
1678 1696 node50 = rl.node(rllen // 2)
1679 1697 node75 = rl.node(rllen // 4 * 3)
1680 1698 node100 = rl.node(rllen - 1)
1681 1699
1682 1700 allrevs = range(rllen)
1683 1701 allrevsrev = list(reversed(allrevs))
1684 1702 allnodes = [rl.node(rev) for rev in range(rllen)]
1685 1703 allnodesrev = list(reversed(allnodes))
1686 1704
1687 1705 def constructor():
1688 1706 revlog.revlog(opener, indexfile)
1689 1707
1690 1708 def read():
1691 1709 with opener(indexfile) as fh:
1692 1710 fh.read()
1693 1711
1694 1712 def parseindex():
1695 1713 revlogio.parseindex(data, inline)
1696 1714
1697 1715 def getentry(revornode):
1698 1716 index = revlogio.parseindex(data, inline)[0]
1699 1717 index[revornode]
1700 1718
1701 1719 def getentries(revs, count=1):
1702 1720 index = revlogio.parseindex(data, inline)[0]
1703 1721
1704 1722 for i in range(count):
1705 1723 for rev in revs:
1706 1724 index[rev]
1707 1725
1708 1726 def resolvenode(node):
1709 1727 nodemap = revlogio.parseindex(data, inline)[1]
1710 1728 # This only works for the C code.
1711 1729 if nodemap is None:
1712 1730 return
1713 1731
1714 1732 try:
1715 1733 nodemap[node]
1716 1734 except error.RevlogError:
1717 1735 pass
1718 1736
1719 1737 def resolvenodes(nodes, count=1):
1720 1738 nodemap = revlogio.parseindex(data, inline)[1]
1721 1739 if nodemap is None:
1722 1740 return
1723 1741
1724 1742 for i in range(count):
1725 1743 for node in nodes:
1726 1744 try:
1727 1745 nodemap[node]
1728 1746 except error.RevlogError:
1729 1747 pass
1730 1748
1731 1749 benches = [
1732 1750 (constructor, b'revlog constructor'),
1733 1751 (read, b'read'),
1734 1752 (parseindex, b'create index object'),
1735 1753 (lambda: getentry(0), b'retrieve index entry for rev 0'),
1736 1754 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
1737 1755 (lambda: resolvenode(node0), b'look up node at rev 0'),
1738 1756 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
1739 1757 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
1740 1758 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
1741 1759 (lambda: resolvenode(node100), b'look up node at tip'),
1742 1760 # 2x variation is to measure caching impact.
1743 1761 (lambda: resolvenodes(allnodes),
1744 1762 b'look up all nodes (forward)'),
1745 1763 (lambda: resolvenodes(allnodes, 2),
1746 1764 b'look up all nodes 2x (forward)'),
1747 1765 (lambda: resolvenodes(allnodesrev),
1748 1766 b'look up all nodes (reverse)'),
1749 1767 (lambda: resolvenodes(allnodesrev, 2),
1750 1768 b'look up all nodes 2x (reverse)'),
1751 1769 (lambda: getentries(allrevs),
1752 1770 b'retrieve all index entries (forward)'),
1753 1771 (lambda: getentries(allrevs, 2),
1754 1772 b'retrieve all index entries 2x (forward)'),
1755 1773 (lambda: getentries(allrevsrev),
1756 1774 b'retrieve all index entries (reverse)'),
1757 1775 (lambda: getentries(allrevsrev, 2),
1758 1776 b'retrieve all index entries 2x (reverse)'),
1759 1777 ]
1760 1778
1761 1779 for fn, title in benches:
1762 1780 timer, fm = gettimer(ui, opts)
1763 1781 timer(fn, title=title)
1764 1782 fm.end()
1765 1783
1766 1784 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
1767 1785 [(b'd', b'dist', 100, b'distance between the revisions'),
1768 1786 (b's', b'startrev', 0, b'revision to start reading at'),
1769 1787 (b'', b'reverse', False, b'read in reverse')],
1770 1788 b'-c|-m|FILE')
1771 1789 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
1772 1790 **opts):
1773 1791 """Benchmark reading a series of revisions from a revlog.
1774 1792
1775 1793 By default, we read every ``-d/--dist`` revision from 0 to tip of
1776 1794 the specified revlog.
1777 1795
1778 1796 The start revision can be defined via ``-s/--startrev``.
1779 1797 """
1780 1798 opts = _byteskwargs(opts)
1781 1799
1782 1800 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
1783 1801 rllen = getlen(ui)(rl)
1784 1802
1785 1803 if startrev < 0:
1786 1804 startrev = rllen + startrev
1787 1805
1788 1806 def d():
1789 1807 rl.clearcaches()
1790 1808
1791 1809 beginrev = startrev
1792 1810 endrev = rllen
1793 1811 dist = opts[b'dist']
1794 1812
1795 1813 if reverse:
1796 1814 beginrev, endrev = endrev - 1, beginrev - 1
1797 1815 dist = -1 * dist
1798 1816
1799 1817 for x in _xrange(beginrev, endrev, dist):
1800 1818 # Old revisions don't support passing int.
1801 1819 n = rl.node(x)
1802 1820 rl.revision(n)
1803 1821
1804 1822 timer, fm = gettimer(ui, opts)
1805 1823 timer(d)
1806 1824 fm.end()
1807 1825
1808 1826 @command(b'perfrevlogwrite', revlogopts + formatteropts +
1809 1827 [(b's', b'startrev', 1000, b'revision to start writing at'),
1810 1828 (b'', b'stoprev', -1, b'last revision to write'),
1811 1829 (b'', b'count', 3, b'last revision to write'),
1812 1830 (b'', b'details', False, b'print timing for every revisions tested'),
1813 1831 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
1814 1832 (b'', b'lazydeltabase', True, b'try the provided delta first'),
1815 1833 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1816 1834 ],
1817 1835 b'-c|-m|FILE')
1818 1836 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
1819 1837 """Benchmark writing a series of revisions to a revlog.
1820 1838
1821 1839 Possible source values are:
1822 1840 * `full`: add from a full text (default).
1823 1841 * `parent-1`: add from a delta to the first parent
1824 1842 * `parent-2`: add from a delta to the second parent if it exists
1825 1843 (use a delta from the first parent otherwise)
1826 1844 * `parent-smallest`: add from the smallest delta (either p1 or p2)
1827 1845 * `storage`: add from the existing precomputed deltas
1828 1846 """
1829 1847 opts = _byteskwargs(opts)
1830 1848
1831 1849 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
1832 1850 rllen = getlen(ui)(rl)
1833 1851 if startrev < 0:
1834 1852 startrev = rllen + startrev
1835 1853 if stoprev < 0:
1836 1854 stoprev = rllen + stoprev
1837 1855
1838 1856 lazydeltabase = opts['lazydeltabase']
1839 1857 source = opts['source']
1840 1858 clearcaches = opts['clear_caches']
1841 1859 validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest',
1842 1860 b'storage')
1843 1861 if source not in validsource:
1844 1862 raise error.Abort('invalid source type: %s' % source)
1845 1863
1846 1864 ### actually gather results
1847 1865 count = opts['count']
1848 1866 if count <= 0:
1849 1867 raise error.Abort('invalide run count: %d' % count)
1850 1868 allresults = []
1851 1869 for c in range(count):
1852 1870 timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1,
1853 1871 lazydeltabase=lazydeltabase,
1854 1872 clearcaches=clearcaches)
1855 1873 allresults.append(timing)
1856 1874
1857 1875 ### consolidate the results in a single list
1858 1876 results = []
1859 1877 for idx, (rev, t) in enumerate(allresults[0]):
1860 1878 ts = [t]
1861 1879 for other in allresults[1:]:
1862 1880 orev, ot = other[idx]
1863 1881 assert orev == rev
1864 1882 ts.append(ot)
1865 1883 results.append((rev, ts))
1866 1884 resultcount = len(results)
1867 1885
1868 1886 ### Compute and display relevant statistics
1869 1887
1870 1888 # get a formatter
1871 1889 fm = ui.formatter(b'perf', opts)
1872 1890 displayall = ui.configbool(b"perf", b"all-timing", False)
1873 1891
1874 1892 # print individual details if requested
1875 1893 if opts['details']:
1876 1894 for idx, item in enumerate(results, 1):
1877 1895 rev, data = item
1878 1896 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
1879 1897 formatone(fm, data, title=title, displayall=displayall)
1880 1898
1881 1899 # sorts results by median time
1882 1900 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
1883 1901 # list of (name, index) to display)
1884 1902 relevants = [
1885 1903 ("min", 0),
1886 1904 ("10%", resultcount * 10 // 100),
1887 1905 ("25%", resultcount * 25 // 100),
1888 1906 ("50%", resultcount * 70 // 100),
1889 1907 ("75%", resultcount * 75 // 100),
1890 1908 ("90%", resultcount * 90 // 100),
1891 1909 ("95%", resultcount * 95 // 100),
1892 1910 ("99%", resultcount * 99 // 100),
1893 1911 ("99.9%", resultcount * 999 // 1000),
1894 1912 ("99.99%", resultcount * 9999 // 10000),
1895 1913 ("99.999%", resultcount * 99999 // 100000),
1896 1914 ("max", -1),
1897 1915 ]
1898 1916 if not ui.quiet:
1899 1917 for name, idx in relevants:
1900 1918 data = results[idx]
1901 1919 title = '%s of %d, rev %d' % (name, resultcount, data[0])
1902 1920 formatone(fm, data[1], title=title, displayall=displayall)
1903 1921
1904 1922 # XXX summing that many float will not be very precise, we ignore this fact
1905 1923 # for now
1906 1924 totaltime = []
1907 1925 for item in allresults:
1908 1926 totaltime.append((sum(x[1][0] for x in item),
1909 1927 sum(x[1][1] for x in item),
1910 1928 sum(x[1][2] for x in item),)
1911 1929 )
1912 1930 formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
1913 1931 displayall=displayall)
1914 1932 fm.end()
1915 1933
1916 1934 class _faketr(object):
1917 1935 def add(s, x, y, z=None):
1918 1936 return None
1919 1937
1920 1938 def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None,
1921 1939 lazydeltabase=True, clearcaches=True):
1922 1940 timings = []
1923 1941 tr = _faketr()
1924 1942 with _temprevlog(ui, orig, startrev) as dest:
1925 1943 dest._lazydeltabase = lazydeltabase
1926 1944 revs = list(orig.revs(startrev, stoprev))
1927 1945 total = len(revs)
1928 1946 topic = 'adding'
1929 1947 if runidx is not None:
1930 1948 topic += ' (run #%d)' % runidx
1931 1949 # Support both old and new progress API
1932 1950 if util.safehasattr(ui, 'makeprogress'):
1933 1951 progress = ui.makeprogress(topic, unit='revs', total=total)
1934 1952 def updateprogress(pos):
1935 1953 progress.update(pos)
1936 1954 def completeprogress():
1937 1955 progress.complete()
1938 1956 else:
1939 1957 def updateprogress(pos):
1940 1958 ui.progress(topic, pos, unit='revs', total=total)
1941 1959 def completeprogress():
1942 1960 ui.progress(topic, None, unit='revs', total=total)
1943 1961
1944 1962 for idx, rev in enumerate(revs):
1945 1963 updateprogress(idx)
1946 1964 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
1947 1965 if clearcaches:
1948 1966 dest.index.clearcaches()
1949 1967 dest.clearcaches()
1950 1968 with timeone() as r:
1951 1969 dest.addrawrevision(*addargs, **addkwargs)
1952 1970 timings.append((rev, r[0]))
1953 1971 updateprogress(total)
1954 1972 completeprogress()
1955 1973 return timings
1956 1974
1957 1975 def _getrevisionseed(orig, rev, tr, source):
1958 1976 from mercurial.node import nullid
1959 1977
1960 1978 linkrev = orig.linkrev(rev)
1961 1979 node = orig.node(rev)
1962 1980 p1, p2 = orig.parents(node)
1963 1981 flags = orig.flags(rev)
1964 1982 cachedelta = None
1965 1983 text = None
1966 1984
1967 1985 if source == b'full':
1968 1986 text = orig.revision(rev)
1969 1987 elif source == b'parent-1':
1970 1988 baserev = orig.rev(p1)
1971 1989 cachedelta = (baserev, orig.revdiff(p1, rev))
1972 1990 elif source == b'parent-2':
1973 1991 parent = p2
1974 1992 if p2 == nullid:
1975 1993 parent = p1
1976 1994 baserev = orig.rev(parent)
1977 1995 cachedelta = (baserev, orig.revdiff(parent, rev))
1978 1996 elif source == b'parent-smallest':
1979 1997 p1diff = orig.revdiff(p1, rev)
1980 1998 parent = p1
1981 1999 diff = p1diff
1982 2000 if p2 != nullid:
1983 2001 p2diff = orig.revdiff(p2, rev)
1984 2002 if len(p1diff) > len(p2diff):
1985 2003 parent = p2
1986 2004 diff = p2diff
1987 2005 baserev = orig.rev(parent)
1988 2006 cachedelta = (baserev, diff)
1989 2007 elif source == b'storage':
1990 2008 baserev = orig.deltaparent(rev)
1991 2009 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
1992 2010
1993 2011 return ((text, tr, linkrev, p1, p2),
1994 2012 {'node': node, 'flags': flags, 'cachedelta': cachedelta})
1995 2013
1996 2014 @contextlib.contextmanager
1997 2015 def _temprevlog(ui, orig, truncaterev):
1998 2016 from mercurial import vfs as vfsmod
1999 2017
2000 2018 if orig._inline:
2001 2019 raise error.Abort('not supporting inline revlog (yet)')
2002 2020
2003 2021 origindexpath = orig.opener.join(orig.indexfile)
2004 2022 origdatapath = orig.opener.join(orig.datafile)
2005 2023 indexname = 'revlog.i'
2006 2024 dataname = 'revlog.d'
2007 2025
2008 2026 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
2009 2027 try:
2010 2028 # copy the data file in a temporary directory
2011 2029 ui.debug('copying data in %s\n' % tmpdir)
2012 2030 destindexpath = os.path.join(tmpdir, 'revlog.i')
2013 2031 destdatapath = os.path.join(tmpdir, 'revlog.d')
2014 2032 shutil.copyfile(origindexpath, destindexpath)
2015 2033 shutil.copyfile(origdatapath, destdatapath)
2016 2034
2017 2035 # remove the data we want to add again
2018 2036 ui.debug('truncating data to be rewritten\n')
2019 2037 with open(destindexpath, 'ab') as index:
2020 2038 index.seek(0)
2021 2039 index.truncate(truncaterev * orig._io.size)
2022 2040 with open(destdatapath, 'ab') as data:
2023 2041 data.seek(0)
2024 2042 data.truncate(orig.start(truncaterev))
2025 2043
2026 2044 # instantiate a new revlog from the temporary copy
2027 2045 ui.debug('truncating adding to be rewritten\n')
2028 2046 vfs = vfsmod.vfs(tmpdir)
2029 2047 vfs.options = getattr(orig.opener, 'options', None)
2030 2048
2031 2049 dest = revlog.revlog(vfs,
2032 2050 indexfile=indexname,
2033 2051 datafile=dataname)
2034 2052 if dest._inline:
2035 2053 raise error.Abort('not supporting inline revlog (yet)')
2036 2054 # make sure internals are initialized
2037 2055 dest.revision(len(dest) - 1)
2038 2056 yield dest
2039 2057 del dest, vfs
2040 2058 finally:
2041 2059 shutil.rmtree(tmpdir, True)
2042 2060
2043 2061 @command(b'perfrevlogchunks', revlogopts + formatteropts +
2044 2062 [(b'e', b'engines', b'', b'compression engines to use'),
2045 2063 (b's', b'startrev', 0, b'revision to start at')],
2046 2064 b'-c|-m|FILE')
2047 2065 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
2048 2066 """Benchmark operations on revlog chunks.
2049 2067
2050 2068 Logically, each revlog is a collection of fulltext revisions. However,
2051 2069 stored within each revlog are "chunks" of possibly compressed data. This
2052 2070 data needs to be read and decompressed or compressed and written.
2053 2071
2054 2072 This command measures the time it takes to read+decompress and recompress
2055 2073 chunks in a revlog. It effectively isolates I/O and compression performance.
2056 2074 For measurements of higher-level operations like resolving revisions,
2057 2075 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
2058 2076 """
2059 2077 opts = _byteskwargs(opts)
2060 2078
2061 2079 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
2062 2080
2063 2081 # _chunkraw was renamed to _getsegmentforrevs.
2064 2082 try:
2065 2083 segmentforrevs = rl._getsegmentforrevs
2066 2084 except AttributeError:
2067 2085 segmentforrevs = rl._chunkraw
2068 2086
2069 2087 # Verify engines argument.
2070 2088 if engines:
2071 2089 engines = set(e.strip() for e in engines.split(b','))
2072 2090 for engine in engines:
2073 2091 try:
2074 2092 util.compressionengines[engine]
2075 2093 except KeyError:
2076 2094 raise error.Abort(b'unknown compression engine: %s' % engine)
2077 2095 else:
2078 2096 engines = []
2079 2097 for e in util.compengines:
2080 2098 engine = util.compengines[e]
2081 2099 try:
2082 2100 if engine.available():
2083 2101 engine.revlogcompressor().compress(b'dummy')
2084 2102 engines.append(e)
2085 2103 except NotImplementedError:
2086 2104 pass
2087 2105
2088 2106 revs = list(rl.revs(startrev, len(rl) - 1))
2089 2107
2090 2108 def rlfh(rl):
2091 2109 if rl._inline:
2092 2110 return getsvfs(repo)(rl.indexfile)
2093 2111 else:
2094 2112 return getsvfs(repo)(rl.datafile)
2095 2113
2096 2114 def doread():
2097 2115 rl.clearcaches()
2098 2116 for rev in revs:
2099 2117 segmentforrevs(rev, rev)
2100 2118
2101 2119 def doreadcachedfh():
2102 2120 rl.clearcaches()
2103 2121 fh = rlfh(rl)
2104 2122 for rev in revs:
2105 2123 segmentforrevs(rev, rev, df=fh)
2106 2124
2107 2125 def doreadbatch():
2108 2126 rl.clearcaches()
2109 2127 segmentforrevs(revs[0], revs[-1])
2110 2128
2111 2129 def doreadbatchcachedfh():
2112 2130 rl.clearcaches()
2113 2131 fh = rlfh(rl)
2114 2132 segmentforrevs(revs[0], revs[-1], df=fh)
2115 2133
2116 2134 def dochunk():
2117 2135 rl.clearcaches()
2118 2136 fh = rlfh(rl)
2119 2137 for rev in revs:
2120 2138 rl._chunk(rev, df=fh)
2121 2139
2122 2140 chunks = [None]
2123 2141
2124 2142 def dochunkbatch():
2125 2143 rl.clearcaches()
2126 2144 fh = rlfh(rl)
2127 2145 # Save chunks as a side-effect.
2128 2146 chunks[0] = rl._chunks(revs, df=fh)
2129 2147
2130 2148 def docompress(compressor):
2131 2149 rl.clearcaches()
2132 2150
2133 2151 try:
2134 2152 # Swap in the requested compression engine.
2135 2153 oldcompressor = rl._compressor
2136 2154 rl._compressor = compressor
2137 2155 for chunk in chunks[0]:
2138 2156 rl.compress(chunk)
2139 2157 finally:
2140 2158 rl._compressor = oldcompressor
2141 2159
2142 2160 benches = [
2143 2161 (lambda: doread(), b'read'),
2144 2162 (lambda: doreadcachedfh(), b'read w/ reused fd'),
2145 2163 (lambda: doreadbatch(), b'read batch'),
2146 2164 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
2147 2165 (lambda: dochunk(), b'chunk'),
2148 2166 (lambda: dochunkbatch(), b'chunk batch'),
2149 2167 ]
2150 2168
2151 2169 for engine in sorted(engines):
2152 2170 compressor = util.compengines[engine].revlogcompressor()
2153 2171 benches.append((functools.partial(docompress, compressor),
2154 2172 b'compress w/ %s' % engine))
2155 2173
2156 2174 for fn, title in benches:
2157 2175 timer, fm = gettimer(ui, opts)
2158 2176 timer(fn, title=title)
2159 2177 fm.end()
2160 2178
2161 2179 @command(b'perfrevlogrevision', revlogopts + formatteropts +
2162 2180 [(b'', b'cache', False, b'use caches instead of clearing')],
2163 2181 b'-c|-m|FILE REV')
2164 2182 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
2165 2183 """Benchmark obtaining a revlog revision.
2166 2184
2167 2185 Obtaining a revlog revision consists of roughly the following steps:
2168 2186
2169 2187 1. Compute the delta chain
2170 2188 2. Slice the delta chain if applicable
2171 2189 3. Obtain the raw chunks for that delta chain
2172 2190 4. Decompress each raw chunk
2173 2191 5. Apply binary patches to obtain fulltext
2174 2192 6. Verify hash of fulltext
2175 2193
2176 2194 This command measures the time spent in each of these phases.
2177 2195 """
2178 2196 opts = _byteskwargs(opts)
2179 2197
2180 2198 if opts.get(b'changelog') or opts.get(b'manifest'):
2181 2199 file_, rev = None, file_
2182 2200 elif rev is None:
2183 2201 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
2184 2202
2185 2203 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
2186 2204
2187 2205 # _chunkraw was renamed to _getsegmentforrevs.
2188 2206 try:
2189 2207 segmentforrevs = r._getsegmentforrevs
2190 2208 except AttributeError:
2191 2209 segmentforrevs = r._chunkraw
2192 2210
2193 2211 node = r.lookup(rev)
2194 2212 rev = r.rev(node)
2195 2213
2196 2214 def getrawchunks(data, chain):
2197 2215 start = r.start
2198 2216 length = r.length
2199 2217 inline = r._inline
2200 2218 iosize = r._io.size
2201 2219 buffer = util.buffer
2202 2220
2203 2221 chunks = []
2204 2222 ladd = chunks.append
2205 2223 for idx, item in enumerate(chain):
2206 2224 offset = start(item[0])
2207 2225 bits = data[idx]
2208 2226 for rev in item:
2209 2227 chunkstart = start(rev)
2210 2228 if inline:
2211 2229 chunkstart += (rev + 1) * iosize
2212 2230 chunklength = length(rev)
2213 2231 ladd(buffer(bits, chunkstart - offset, chunklength))
2214 2232
2215 2233 return chunks
2216 2234
2217 2235 def dodeltachain(rev):
2218 2236 if not cache:
2219 2237 r.clearcaches()
2220 2238 r._deltachain(rev)
2221 2239
2222 2240 def doread(chain):
2223 2241 if not cache:
2224 2242 r.clearcaches()
2225 2243 for item in slicedchain:
2226 2244 segmentforrevs(item[0], item[-1])
2227 2245
2228 2246 def doslice(r, chain, size):
2229 2247 for s in slicechunk(r, chain, targetsize=size):
2230 2248 pass
2231 2249
2232 2250 def dorawchunks(data, chain):
2233 2251 if not cache:
2234 2252 r.clearcaches()
2235 2253 getrawchunks(data, chain)
2236 2254
2237 2255 def dodecompress(chunks):
2238 2256 decomp = r.decompress
2239 2257 for chunk in chunks:
2240 2258 decomp(chunk)
2241 2259
2242 2260 def dopatch(text, bins):
2243 2261 if not cache:
2244 2262 r.clearcaches()
2245 2263 mdiff.patches(text, bins)
2246 2264
2247 2265 def dohash(text):
2248 2266 if not cache:
2249 2267 r.clearcaches()
2250 2268 r.checkhash(text, node, rev=rev)
2251 2269
2252 2270 def dorevision():
2253 2271 if not cache:
2254 2272 r.clearcaches()
2255 2273 r.revision(node)
2256 2274
2257 2275 try:
2258 2276 from mercurial.revlogutils.deltas import slicechunk
2259 2277 except ImportError:
2260 2278 slicechunk = getattr(revlog, '_slicechunk', None)
2261 2279
2262 2280 size = r.length(rev)
2263 2281 chain = r._deltachain(rev)[0]
2264 2282 if not getattr(r, '_withsparseread', False):
2265 2283 slicedchain = (chain,)
2266 2284 else:
2267 2285 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
2268 2286 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
2269 2287 rawchunks = getrawchunks(data, slicedchain)
2270 2288 bins = r._chunks(chain)
2271 2289 text = bytes(bins[0])
2272 2290 bins = bins[1:]
2273 2291 text = mdiff.patches(text, bins)
2274 2292
2275 2293 benches = [
2276 2294 (lambda: dorevision(), b'full'),
2277 2295 (lambda: dodeltachain(rev), b'deltachain'),
2278 2296 (lambda: doread(chain), b'read'),
2279 2297 ]
2280 2298
2281 2299 if getattr(r, '_withsparseread', False):
2282 2300 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
2283 2301 benches.append(slicing)
2284 2302
2285 2303 benches.extend([
2286 2304 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
2287 2305 (lambda: dodecompress(rawchunks), b'decompress'),
2288 2306 (lambda: dopatch(text, bins), b'patch'),
2289 2307 (lambda: dohash(text), b'hash'),
2290 2308 ])
2291 2309
2292 2310 timer, fm = gettimer(ui, opts)
2293 2311 for fn, title in benches:
2294 2312 timer(fn, title=title)
2295 2313 fm.end()
2296 2314
2297 2315 @command(b'perfrevset',
2298 2316 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
2299 2317 (b'', b'contexts', False, b'obtain changectx for each revision')]
2300 2318 + formatteropts, b"REVSET")
2301 2319 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
2302 2320 """benchmark the execution time of a revset
2303 2321
2304 2322 Use the --clean option if need to evaluate the impact of build volatile
2305 2323 revisions set cache on the revset execution. Volatile cache hold filtered
2306 2324 and obsolete related cache."""
2307 2325 opts = _byteskwargs(opts)
2308 2326
2309 2327 timer, fm = gettimer(ui, opts)
2310 2328 def d():
2311 2329 if clear:
2312 2330 repo.invalidatevolatilesets()
2313 2331 if contexts:
2314 2332 for ctx in repo.set(expr): pass
2315 2333 else:
2316 2334 for r in repo.revs(expr): pass
2317 2335 timer(d)
2318 2336 fm.end()
2319 2337
2320 2338 @command(b'perfvolatilesets',
2321 2339 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
2322 2340 ] + formatteropts)
2323 2341 def perfvolatilesets(ui, repo, *names, **opts):
2324 2342 """benchmark the computation of various volatile set
2325 2343
2326 2344 Volatile set computes element related to filtering and obsolescence."""
2327 2345 opts = _byteskwargs(opts)
2328 2346 timer, fm = gettimer(ui, opts)
2329 2347 repo = repo.unfiltered()
2330 2348
2331 2349 def getobs(name):
2332 2350 def d():
2333 2351 repo.invalidatevolatilesets()
2334 2352 if opts[b'clear_obsstore']:
2335 2353 clearfilecache(repo, b'obsstore')
2336 2354 obsolete.getrevs(repo, name)
2337 2355 return d
2338 2356
2339 2357 allobs = sorted(obsolete.cachefuncs)
2340 2358 if names:
2341 2359 allobs = [n for n in allobs if n in names]
2342 2360
2343 2361 for name in allobs:
2344 2362 timer(getobs(name), title=name)
2345 2363
2346 2364 def getfiltered(name):
2347 2365 def d():
2348 2366 repo.invalidatevolatilesets()
2349 2367 if opts[b'clear_obsstore']:
2350 2368 clearfilecache(repo, b'obsstore')
2351 2369 repoview.filterrevs(repo, name)
2352 2370 return d
2353 2371
2354 2372 allfilter = sorted(repoview.filtertable)
2355 2373 if names:
2356 2374 allfilter = [n for n in allfilter if n in names]
2357 2375
2358 2376 for name in allfilter:
2359 2377 timer(getfiltered(name), title=name)
2360 2378 fm.end()
2361 2379
2362 2380 @command(b'perfbranchmap',
2363 2381 [(b'f', b'full', False,
2364 2382 b'Includes build time of subset'),
2365 2383 (b'', b'clear-revbranch', False,
2366 2384 b'purge the revbranch cache between computation'),
2367 2385 ] + formatteropts)
2368 2386 def perfbranchmap(ui, repo, *filternames, **opts):
2369 2387 """benchmark the update of a branchmap
2370 2388
2371 2389 This benchmarks the full repo.branchmap() call with read and write disabled
2372 2390 """
2373 2391 opts = _byteskwargs(opts)
2374 2392 full = opts.get(b"full", False)
2375 2393 clear_revbranch = opts.get(b"clear_revbranch", False)
2376 2394 timer, fm = gettimer(ui, opts)
2377 2395 def getbranchmap(filtername):
2378 2396 """generate a benchmark function for the filtername"""
2379 2397 if filtername is None:
2380 2398 view = repo
2381 2399 else:
2382 2400 view = repo.filtered(filtername)
2383 2401 if util.safehasattr(view._branchcaches, '_per_filter'):
2384 2402 filtered = view._branchcaches._per_filter
2385 2403 else:
2386 2404 # older versions
2387 2405 filtered = view._branchcaches
2388 2406 def d():
2389 2407 if clear_revbranch:
2390 2408 repo.revbranchcache()._clear()
2391 2409 if full:
2392 2410 view._branchcaches.clear()
2393 2411 else:
2394 2412 filtered.pop(filtername, None)
2395 2413 view.branchmap()
2396 2414 return d
2397 2415 # add filter in smaller subset to bigger subset
2398 2416 possiblefilters = set(repoview.filtertable)
2399 2417 if filternames:
2400 2418 possiblefilters &= set(filternames)
2401 2419 subsettable = getbranchmapsubsettable()
2402 2420 allfilters = []
2403 2421 while possiblefilters:
2404 2422 for name in possiblefilters:
2405 2423 subset = subsettable.get(name)
2406 2424 if subset not in possiblefilters:
2407 2425 break
2408 2426 else:
2409 2427 assert False, b'subset cycle %s!' % possiblefilters
2410 2428 allfilters.append(name)
2411 2429 possiblefilters.remove(name)
2412 2430
2413 2431 # warm the cache
2414 2432 if not full:
2415 2433 for name in allfilters:
2416 2434 repo.filtered(name).branchmap()
2417 2435 if not filternames or b'unfiltered' in filternames:
2418 2436 # add unfiltered
2419 2437 allfilters.append(None)
2420 2438
2421 2439 if util.safehasattr(branchmap.branchcache, 'fromfile'):
2422 2440 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
2423 2441 branchcacheread.set(classmethod(lambda *args: None))
2424 2442 else:
2425 2443 # older versions
2426 2444 branchcacheread = safeattrsetter(branchmap, b'read')
2427 2445 branchcacheread.set(lambda *args: None)
2428 2446 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
2429 2447 branchcachewrite.set(lambda *args: None)
2430 2448 try:
2431 2449 for name in allfilters:
2432 2450 printname = name
2433 2451 if name is None:
2434 2452 printname = b'unfiltered'
2435 2453 timer(getbranchmap(name), title=str(printname))
2436 2454 finally:
2437 2455 branchcacheread.restore()
2438 2456 branchcachewrite.restore()
2439 2457 fm.end()
2440 2458
2441 2459 @command(b'perfbranchmapupdate', [
2442 2460 (b'', b'base', [], b'subset of revision to start from'),
2443 2461 (b'', b'target', [], b'subset of revision to end with'),
2444 2462 (b'', b'clear-caches', False, b'clear cache between each runs')
2445 2463 ] + formatteropts)
2446 2464 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
2447 2465 """benchmark branchmap update from for <base> revs to <target> revs
2448 2466
2449 2467 If `--clear-caches` is passed, the following items will be reset before
2450 2468 each update:
2451 2469 * the changelog instance and associated indexes
2452 2470 * the rev-branch-cache instance
2453 2471
2454 2472 Examples:
2455 2473
2456 2474 # update for the one last revision
2457 2475 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
2458 2476
2459 2477 $ update for change coming with a new branch
2460 2478 $ hg perfbranchmapupdate --base 'stable' --target 'default'
2461 2479 """
2462 2480 from mercurial import branchmap
2463 2481 from mercurial import repoview
2464 2482 opts = _byteskwargs(opts)
2465 2483 timer, fm = gettimer(ui, opts)
2466 2484 clearcaches = opts[b'clear_caches']
2467 2485 unfi = repo.unfiltered()
2468 2486 x = [None] # used to pass data between closure
2469 2487
2470 2488 # we use a `list` here to avoid possible side effect from smartset
2471 2489 baserevs = list(scmutil.revrange(repo, base))
2472 2490 targetrevs = list(scmutil.revrange(repo, target))
2473 2491 if not baserevs:
2474 2492 raise error.Abort(b'no revisions selected for --base')
2475 2493 if not targetrevs:
2476 2494 raise error.Abort(b'no revisions selected for --target')
2477 2495
2478 2496 # make sure the target branchmap also contains the one in the base
2479 2497 targetrevs = list(set(baserevs) | set(targetrevs))
2480 2498 targetrevs.sort()
2481 2499
2482 2500 cl = repo.changelog
2483 2501 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
2484 2502 allbaserevs.sort()
2485 2503 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
2486 2504
2487 2505 newrevs = list(alltargetrevs.difference(allbaserevs))
2488 2506 newrevs.sort()
2489 2507
2490 2508 allrevs = frozenset(unfi.changelog.revs())
2491 2509 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
2492 2510 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
2493 2511
2494 2512 def basefilter(repo, visibilityexceptions=None):
2495 2513 return basefilterrevs
2496 2514
2497 2515 def targetfilter(repo, visibilityexceptions=None):
2498 2516 return targetfilterrevs
2499 2517
2500 2518 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
2501 2519 ui.status(msg % (len(allbaserevs), len(newrevs)))
2502 2520 if targetfilterrevs:
2503 2521 msg = b'(%d revisions still filtered)\n'
2504 2522 ui.status(msg % len(targetfilterrevs))
2505 2523
2506 2524 try:
2507 2525 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
2508 2526 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
2509 2527
2510 2528 baserepo = repo.filtered(b'__perf_branchmap_update_base')
2511 2529 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
2512 2530
2513 2531 # try to find an existing branchmap to reuse
2514 2532 subsettable = getbranchmapsubsettable()
2515 2533 candidatefilter = subsettable.get(None)
2516 2534 while candidatefilter is not None:
2517 2535 candidatebm = repo.filtered(candidatefilter).branchmap()
2518 2536 if candidatebm.validfor(baserepo):
2519 2537 filtered = repoview.filterrevs(repo, candidatefilter)
2520 2538 missing = [r for r in allbaserevs if r in filtered]
2521 2539 base = candidatebm.copy()
2522 2540 base.update(baserepo, missing)
2523 2541 break
2524 2542 candidatefilter = subsettable.get(candidatefilter)
2525 2543 else:
2526 2544 # no suitable subset where found
2527 2545 base = branchmap.branchcache()
2528 2546 base.update(baserepo, allbaserevs)
2529 2547
2530 2548 def setup():
2531 2549 x[0] = base.copy()
2532 2550 if clearcaches:
2533 2551 unfi._revbranchcache = None
2534 2552 clearchangelog(repo)
2535 2553
2536 2554 def bench():
2537 2555 x[0].update(targetrepo, newrevs)
2538 2556
2539 2557 timer(bench, setup=setup)
2540 2558 fm.end()
2541 2559 finally:
2542 2560 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
2543 2561 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
2544 2562
2545 2563 @command(b'perfbranchmapload', [
2546 2564 (b'f', b'filter', b'', b'Specify repoview filter'),
2547 2565 (b'', b'list', False, b'List brachmap filter caches'),
2548 2566 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
2549 2567
2550 2568 ] + formatteropts)
2551 2569 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
2552 2570 """benchmark reading the branchmap"""
2553 2571 opts = _byteskwargs(opts)
2554 2572 clearrevlogs = opts[b'clear_revlogs']
2555 2573
2556 2574 if list:
2557 2575 for name, kind, st in repo.cachevfs.readdir(stat=True):
2558 2576 if name.startswith(b'branch2'):
2559 2577 filtername = name.partition(b'-')[2] or b'unfiltered'
2560 2578 ui.status(b'%s - %s\n'
2561 2579 % (filtername, util.bytecount(st.st_size)))
2562 2580 return
2563 2581 if not filter:
2564 2582 filter = None
2565 2583 subsettable = getbranchmapsubsettable()
2566 2584 if filter is None:
2567 2585 repo = repo.unfiltered()
2568 2586 else:
2569 2587 repo = repoview.repoview(repo, filter)
2570 2588
2571 2589 repo.branchmap() # make sure we have a relevant, up to date branchmap
2572 2590
2573 2591 try:
2574 2592 fromfile = branchmap.branchcache.fromfile
2575 2593 except AttributeError:
2576 2594 # older versions
2577 2595 fromfile = branchmap.read
2578 2596
2579 2597 currentfilter = filter
2580 2598 # try once without timer, the filter may not be cached
2581 2599 while fromfile(repo) is None:
2582 2600 currentfilter = subsettable.get(currentfilter)
2583 2601 if currentfilter is None:
2584 2602 raise error.Abort(b'No branchmap cached for %s repo'
2585 2603 % (filter or b'unfiltered'))
2586 2604 repo = repo.filtered(currentfilter)
2587 2605 timer, fm = gettimer(ui, opts)
2588 2606 def setup():
2589 2607 if clearrevlogs:
2590 2608 clearchangelog(repo)
2591 2609 def bench():
2592 2610 fromfile(repo)
2593 2611 timer(bench, setup=setup)
2594 2612 fm.end()
2595 2613
2596 2614 @command(b'perfloadmarkers')
2597 2615 def perfloadmarkers(ui, repo):
2598 2616 """benchmark the time to parse the on-disk markers for a repo
2599 2617
2600 2618 Result is the number of markers in the repo."""
2601 2619 timer, fm = gettimer(ui)
2602 2620 svfs = getsvfs(repo)
2603 2621 timer(lambda: len(obsolete.obsstore(svfs)))
2604 2622 fm.end()
2605 2623
2606 2624 @command(b'perflrucachedict', formatteropts +
2607 2625 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
2608 2626 (b'', b'mincost', 0, b'smallest cost of items in cache'),
2609 2627 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
2610 2628 (b'', b'size', 4, b'size of cache'),
2611 2629 (b'', b'gets', 10000, b'number of key lookups'),
2612 2630 (b'', b'sets', 10000, b'number of key sets'),
2613 2631 (b'', b'mixed', 10000, b'number of mixed mode operations'),
2614 2632 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
2615 2633 norepo=True)
2616 2634 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
2617 2635 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
2618 2636 opts = _byteskwargs(opts)
2619 2637
2620 2638 def doinit():
2621 2639 for i in _xrange(10000):
2622 2640 util.lrucachedict(size)
2623 2641
2624 2642 costrange = list(range(mincost, maxcost + 1))
2625 2643
2626 2644 values = []
2627 2645 for i in _xrange(size):
2628 2646 values.append(random.randint(0, _maxint))
2629 2647
2630 2648 # Get mode fills the cache and tests raw lookup performance with no
2631 2649 # eviction.
2632 2650 getseq = []
2633 2651 for i in _xrange(gets):
2634 2652 getseq.append(random.choice(values))
2635 2653
2636 2654 def dogets():
2637 2655 d = util.lrucachedict(size)
2638 2656 for v in values:
2639 2657 d[v] = v
2640 2658 for key in getseq:
2641 2659 value = d[key]
2642 2660 value # silence pyflakes warning
2643 2661
2644 2662 def dogetscost():
2645 2663 d = util.lrucachedict(size, maxcost=costlimit)
2646 2664 for i, v in enumerate(values):
2647 2665 d.insert(v, v, cost=costs[i])
2648 2666 for key in getseq:
2649 2667 try:
2650 2668 value = d[key]
2651 2669 value # silence pyflakes warning
2652 2670 except KeyError:
2653 2671 pass
2654 2672
2655 2673 # Set mode tests insertion speed with cache eviction.
2656 2674 setseq = []
2657 2675 costs = []
2658 2676 for i in _xrange(sets):
2659 2677 setseq.append(random.randint(0, _maxint))
2660 2678 costs.append(random.choice(costrange))
2661 2679
2662 2680 def doinserts():
2663 2681 d = util.lrucachedict(size)
2664 2682 for v in setseq:
2665 2683 d.insert(v, v)
2666 2684
2667 2685 def doinsertscost():
2668 2686 d = util.lrucachedict(size, maxcost=costlimit)
2669 2687 for i, v in enumerate(setseq):
2670 2688 d.insert(v, v, cost=costs[i])
2671 2689
2672 2690 def dosets():
2673 2691 d = util.lrucachedict(size)
2674 2692 for v in setseq:
2675 2693 d[v] = v
2676 2694
2677 2695 # Mixed mode randomly performs gets and sets with eviction.
2678 2696 mixedops = []
2679 2697 for i in _xrange(mixed):
2680 2698 r = random.randint(0, 100)
2681 2699 if r < mixedgetfreq:
2682 2700 op = 0
2683 2701 else:
2684 2702 op = 1
2685 2703
2686 2704 mixedops.append((op,
2687 2705 random.randint(0, size * 2),
2688 2706 random.choice(costrange)))
2689 2707
2690 2708 def domixed():
2691 2709 d = util.lrucachedict(size)
2692 2710
2693 2711 for op, v, cost in mixedops:
2694 2712 if op == 0:
2695 2713 try:
2696 2714 d[v]
2697 2715 except KeyError:
2698 2716 pass
2699 2717 else:
2700 2718 d[v] = v
2701 2719
2702 2720 def domixedcost():
2703 2721 d = util.lrucachedict(size, maxcost=costlimit)
2704 2722
2705 2723 for op, v, cost in mixedops:
2706 2724 if op == 0:
2707 2725 try:
2708 2726 d[v]
2709 2727 except KeyError:
2710 2728 pass
2711 2729 else:
2712 2730 d.insert(v, v, cost=cost)
2713 2731
2714 2732 benches = [
2715 2733 (doinit, b'init'),
2716 2734 ]
2717 2735
2718 2736 if costlimit:
2719 2737 benches.extend([
2720 2738 (dogetscost, b'gets w/ cost limit'),
2721 2739 (doinsertscost, b'inserts w/ cost limit'),
2722 2740 (domixedcost, b'mixed w/ cost limit'),
2723 2741 ])
2724 2742 else:
2725 2743 benches.extend([
2726 2744 (dogets, b'gets'),
2727 2745 (doinserts, b'inserts'),
2728 2746 (dosets, b'sets'),
2729 2747 (domixed, b'mixed')
2730 2748 ])
2731 2749
2732 2750 for fn, title in benches:
2733 2751 timer, fm = gettimer(ui, opts)
2734 2752 timer(fn, title=title)
2735 2753 fm.end()
2736 2754
2737 2755 @command(b'perfwrite', formatteropts)
2738 2756 def perfwrite(ui, repo, **opts):
2739 2757 """microbenchmark ui.write
2740 2758 """
2741 2759 opts = _byteskwargs(opts)
2742 2760
2743 2761 timer, fm = gettimer(ui, opts)
2744 2762 def write():
2745 2763 for i in range(100000):
2746 2764 ui.write((b'Testing write performance\n'))
2747 2765 timer(write)
2748 2766 fm.end()
2749 2767
2750 2768 def uisetup(ui):
2751 2769 if (util.safehasattr(cmdutil, b'openrevlog') and
2752 2770 not util.safehasattr(commands, b'debugrevlogopts')):
2753 2771 # for "historical portability":
2754 2772 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
2755 2773 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
2756 2774 # openrevlog() should cause failure, because it has been
2757 2775 # available since 3.5 (or 49c583ca48c4).
2758 2776 def openrevlog(orig, repo, cmd, file_, opts):
2759 2777 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
2760 2778 raise error.Abort(b"This version doesn't support --dir option",
2761 2779 hint=b"use 3.5 or later")
2762 2780 return orig(repo, cmd, file_, opts)
2763 2781 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
2764 2782
2765 2783 @command(b'perfprogress', formatteropts + [
2766 2784 (b'', b'topic', b'topic', b'topic for progress messages'),
2767 2785 (b'c', b'total', 1000000, b'total value we are progressing to'),
2768 2786 ], norepo=True)
2769 2787 def perfprogress(ui, topic=None, total=None, **opts):
2770 2788 """printing of progress bars"""
2771 2789 opts = _byteskwargs(opts)
2772 2790
2773 2791 timer, fm = gettimer(ui, opts)
2774 2792
2775 2793 def doprogress():
2776 2794 with ui.makeprogress(topic, total=total) as progress:
2777 2795 for i in pycompat.xrange(total):
2778 2796 progress.increment()
2779 2797
2780 2798 timer(doprogress)
2781 2799 fm.end()
@@ -1,302 +1,320
1 1 #require test-repo
2 2
3 3 Set vars:
4 4
5 5 $ . "$TESTDIR/helpers-testrepo.sh"
6 6 $ CONTRIBDIR="$TESTDIR/../contrib"
7 7
8 8 Prepare repo:
9 9
10 10 $ hg init
11 11
12 12 $ echo this is file a > a
13 13 $ hg add a
14 14 $ hg commit -m first
15 15
16 16 $ echo adding to file a >> a
17 17 $ hg commit -m second
18 18
19 19 $ echo adding more to file a >> a
20 20 $ hg commit -m third
21 21
22 22 $ hg up -r 0
23 23 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
24 24 $ echo merge-this >> a
25 25 $ hg commit -m merge-able
26 26 created new head
27 27
28 28 $ hg up -r 2
29 29 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
30 30
31 31 perfstatus
32 32
33 33 $ cat >> $HGRCPATH << EOF
34 34 > [extensions]
35 35 > perf=$CONTRIBDIR/perf.py
36 36 > [perf]
37 37 > presleep=0
38 38 > stub=on
39 39 > parentscount=1
40 40 > EOF
41 41 $ hg help -e perf
42 42 perf extension - helper extension to measure performance
43 43
44 Configurations
45 ==============
46
47 "perf"
48 ------
49
50 "all-timing"
51 When set, additional statistic will be reported for each benchmark: best,
52 worst, median average. If not set only the best timing is reported
53 (default: off).
54
55 "presleep"
56 number of second to wait before any group of run (default: 1)
57
58 "stub"
59 When set, benchmark will only be run once, useful for testing (default:
60 off)
61
44 62 list of commands:
45 63
46 64 perfaddremove
47 65 (no help text available)
48 66 perfancestors
49 67 (no help text available)
50 68 perfancestorset
51 69 (no help text available)
52 70 perfannotate (no help text available)
53 71 perfbdiff benchmark a bdiff between revisions
54 72 perfbookmarks
55 73 benchmark parsing bookmarks from disk to memory
56 74 perfbranchmap
57 75 benchmark the update of a branchmap
58 76 perfbranchmapload
59 77 benchmark reading the branchmap
60 78 perfbranchmapupdate
61 79 benchmark branchmap update from for <base> revs to <target>
62 80 revs
63 81 perfbundleread
64 82 Benchmark reading of bundle files.
65 83 perfcca (no help text available)
66 84 perfchangegroupchangelog
67 85 Benchmark producing a changelog group for a changegroup.
68 86 perfchangeset
69 87 (no help text available)
70 88 perfctxfiles (no help text available)
71 89 perfdiffwd Profile diff of working directory changes
72 90 perfdirfoldmap
73 91 (no help text available)
74 92 perfdirs (no help text available)
75 93 perfdirstate (no help text available)
76 94 perfdirstatedirs
77 95 (no help text available)
78 96 perfdirstatefoldmap
79 97 (no help text available)
80 98 perfdirstatewrite
81 99 (no help text available)
82 100 perfdiscovery
83 101 benchmark discovery between local repo and the peer at given
84 102 path
85 103 perffncacheencode
86 104 (no help text available)
87 105 perffncacheload
88 106 (no help text available)
89 107 perffncachewrite
90 108 (no help text available)
91 109 perfheads benchmark the computation of a changelog heads
92 110 perfhelper-pathcopies
93 111 find statistic about potential parameters for the
94 112 'perftracecopies'
95 113 perfignore benchmark operation related to computing ignore
96 114 perfindex benchmark index creation time followed by a lookup
97 115 perflinelogedits
98 116 (no help text available)
99 117 perfloadmarkers
100 118 benchmark the time to parse the on-disk markers for a repo
101 119 perflog (no help text available)
102 120 perflookup (no help text available)
103 121 perflrucachedict
104 122 (no help text available)
105 123 perfmanifest benchmark the time to read a manifest from disk and return a
106 124 usable
107 125 perfmergecalculate
108 126 (no help text available)
109 127 perfmoonwalk benchmark walking the changelog backwards
110 128 perfnodelookup
111 129 (no help text available)
112 130 perfnodemap benchmark the time necessary to look up revision from a cold
113 131 nodemap
114 132 perfparents (no help text available)
115 133 perfpathcopies
116 134 benchmark the copy tracing logic
117 135 perfphases benchmark phasesets computation
118 136 perfphasesremote
119 137 benchmark time needed to analyse phases of the remote server
120 138 perfprogress printing of progress bars
121 139 perfrawfiles (no help text available)
122 140 perfrevlogchunks
123 141 Benchmark operations on revlog chunks.
124 142 perfrevlogindex
125 143 Benchmark operations against a revlog index.
126 144 perfrevlogrevision
127 145 Benchmark obtaining a revlog revision.
128 146 perfrevlogrevisions
129 147 Benchmark reading a series of revisions from a revlog.
130 148 perfrevlogwrite
131 149 Benchmark writing a series of revisions to a revlog.
132 150 perfrevrange (no help text available)
133 151 perfrevset benchmark the execution time of a revset
134 152 perfstartup (no help text available)
135 153 perfstatus (no help text available)
136 154 perftags (no help text available)
137 155 perftemplating
138 156 test the rendering time of a given template
139 157 perfunidiff benchmark a unified diff between revisions
140 158 perfvolatilesets
141 159 benchmark the computation of various volatile set
142 160 perfwalk (no help text available)
143 161 perfwrite microbenchmark ui.write
144 162
145 163 (use 'hg help -v perf' to show built-in aliases and global options)
146 164 $ hg perfaddremove
147 165 $ hg perfancestors
148 166 $ hg perfancestorset 2
149 167 $ hg perfannotate a
150 168 $ hg perfbdiff -c 1
151 169 $ hg perfbdiff --alldata 1
152 170 $ hg perfunidiff -c 1
153 171 $ hg perfunidiff --alldata 1
154 172 $ hg perfbookmarks
155 173 $ hg perfbranchmap
156 174 $ hg perfbranchmapload
157 175 $ hg perfbranchmapupdate --base "not tip" --target "tip"
158 176 benchmark of branchmap with 3 revisions with 1 new ones
159 177 $ hg perfcca
160 178 $ hg perfchangegroupchangelog
161 179 $ hg perfchangegroupchangelog --cgversion 01
162 180 $ hg perfchangeset 2
163 181 $ hg perfctxfiles 2
164 182 $ hg perfdiffwd
165 183 $ hg perfdirfoldmap
166 184 $ hg perfdirs
167 185 $ hg perfdirstate
168 186 $ hg perfdirstatedirs
169 187 $ hg perfdirstatefoldmap
170 188 $ hg perfdirstatewrite
171 189 #if repofncache
172 190 $ hg perffncacheencode
173 191 $ hg perffncacheload
174 192 $ hg debugrebuildfncache
175 193 fncache already up to date
176 194 $ hg perffncachewrite
177 195 $ hg debugrebuildfncache
178 196 fncache already up to date
179 197 #endif
180 198 $ hg perfheads
181 199 $ hg perfignore
182 200 $ hg perfindex
183 201 $ hg perflinelogedits -n 1
184 202 $ hg perfloadmarkers
185 203 $ hg perflog
186 204 $ hg perflookup 2
187 205 $ hg perflrucache
188 206 $ hg perfmanifest 2
189 207 $ hg perfmanifest -m 44fe2c8352bb3a478ffd7d8350bbc721920134d1
190 208 $ hg perfmanifest -m 44fe2c8352bb
191 209 abort: manifest revision must be integer or full node
192 210 [255]
193 211 $ hg perfmergecalculate -r 3
194 212 $ hg perfmoonwalk
195 213 $ hg perfnodelookup 2
196 214 $ hg perfpathcopies 1 2
197 215 $ hg perfprogress --total 1000
198 216 $ hg perfrawfiles 2
199 217 $ hg perfrevlogindex -c
200 218 #if reporevlogstore
201 219 $ hg perfrevlogrevisions .hg/store/data/a.i
202 220 #endif
203 221 $ hg perfrevlogrevision -m 0
204 222 $ hg perfrevlogchunks -c
205 223 $ hg perfrevrange
206 224 $ hg perfrevset 'all()'
207 225 $ hg perfstartup
208 226 $ hg perfstatus
209 227 $ hg perftags
210 228 $ hg perftemplating
211 229 $ hg perfvolatilesets
212 230 $ hg perfwalk
213 231 $ hg perfparents
214 232 $ hg perfdiscovery -q .
215 233
216 234 test actual output
217 235 ------------------
218 236
219 237 normal output:
220 238
221 239 $ hg perfheads --config perf.stub=no
222 240 ! wall * comb * user * sys * (best of *) (glob)
223 241
224 242 detailed output:
225 243
226 244 $ hg perfheads --config perf.all-timing=yes --config perf.stub=no
227 245 ! wall * comb * user * sys * (best of *) (glob)
228 246 ! wall * comb * user * sys * (max of *) (glob)
229 247 ! wall * comb * user * sys * (avg of *) (glob)
230 248 ! wall * comb * user * sys * (median of *) (glob)
231 249
232 250 test json output
233 251 ----------------
234 252
235 253 normal output:
236 254
237 255 $ hg perfheads --template json --config perf.stub=no
238 256 [
239 257 {
240 258 "comb": *, (glob)
241 259 "count": *, (glob)
242 260 "sys": *, (glob)
243 261 "user": *, (glob)
244 262 "wall": * (glob)
245 263 }
246 264 ]
247 265
248 266 detailed output:
249 267
250 268 $ hg perfheads --template json --config perf.all-timing=yes --config perf.stub=no
251 269 [
252 270 {
253 271 "avg.comb": *, (glob)
254 272 "avg.count": *, (glob)
255 273 "avg.sys": *, (glob)
256 274 "avg.user": *, (glob)
257 275 "avg.wall": *, (glob)
258 276 "comb": *, (glob)
259 277 "count": *, (glob)
260 278 "max.comb": *, (glob)
261 279 "max.count": *, (glob)
262 280 "max.sys": *, (glob)
263 281 "max.user": *, (glob)
264 282 "max.wall": *, (glob)
265 283 "median.comb": *, (glob)
266 284 "median.count": *, (glob)
267 285 "median.sys": *, (glob)
268 286 "median.user": *, (glob)
269 287 "median.wall": *, (glob)
270 288 "sys": *, (glob)
271 289 "user": *, (glob)
272 290 "wall": * (glob)
273 291 }
274 292 ]
275 293
276 294 Check perf.py for historical portability
277 295 ----------------------------------------
278 296
279 297 $ cd "$TESTDIR/.."
280 298
281 299 $ (testrepohg files -r 1.2 glob:mercurial/*.c glob:mercurial/*.py;
282 300 > testrepohg files -r tip glob:mercurial/*.c glob:mercurial/*.py) |
283 301 > "$TESTDIR"/check-perf-code.py contrib/perf.py
284 302 contrib/perf.py:\d+: (re)
285 303 > from mercurial import (
286 304 import newer module separately in try clause for early Mercurial
287 305 contrib/perf.py:\d+: (re)
288 306 > from mercurial import (
289 307 import newer module separately in try clause for early Mercurial
290 308 contrib/perf.py:\d+: (re)
291 309 > origindexpath = orig.opener.join(orig.indexfile)
292 310 use getvfs()/getsvfs() for early Mercurial
293 311 contrib/perf.py:\d+: (re)
294 312 > origdatapath = orig.opener.join(orig.datafile)
295 313 use getvfs()/getsvfs() for early Mercurial
296 314 contrib/perf.py:\d+: (re)
297 315 > vfs = vfsmod.vfs(tmpdir)
298 316 use getvfs()/getsvfs() for early Mercurial
299 317 contrib/perf.py:\d+: (re)
300 318 > vfs.options = getattr(orig.opener, 'options', None)
301 319 use getvfs()/getsvfs() for early Mercurial
302 320 [1]
General Comments 0
You need to be logged in to leave comments. Login now