##// END OF EJS Templates
perf: document perfparents
marmoute -
r42183:b900b392 default
parent child Browse files
Show More
@@ -1,2799 +1,2806 b''
1 1 # perf.py - performance test routines
2 2 '''helper extension to measure performance
3 3
4 4 Configurations
5 5 ==============
6 6
7 7 ``perf``
8 8 --------
9 9
10 10 ``all-timing``
11 11 When set, additional statistic will be reported for each benchmark: best,
12 12 worst, median average. If not set only the best timing is reported
13 13 (default: off).
14 14
15 15 ``presleep``
16 16 number of second to wait before any group of run (default: 1)
17 17
18 18 ``stub``
19 19 When set, benchmark will only be run once, useful for testing (default: off)
20 20 '''
21 21
22 22 # "historical portability" policy of perf.py:
23 23 #
24 24 # We have to do:
25 25 # - make perf.py "loadable" with as wide Mercurial version as possible
26 26 # This doesn't mean that perf commands work correctly with that Mercurial.
27 27 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
28 28 # - make historical perf command work correctly with as wide Mercurial
29 29 # version as possible
30 30 #
31 31 # We have to do, if possible with reasonable cost:
32 32 # - make recent perf command for historical feature work correctly
33 33 # with early Mercurial
34 34 #
35 35 # We don't have to do:
36 36 # - make perf command for recent feature work correctly with early
37 37 # Mercurial
38 38
39 39 from __future__ import absolute_import
40 40 import contextlib
41 41 import functools
42 42 import gc
43 43 import os
44 44 import random
45 45 import shutil
46 46 import struct
47 47 import sys
48 48 import tempfile
49 49 import threading
50 50 import time
51 51 from mercurial import (
52 52 changegroup,
53 53 cmdutil,
54 54 commands,
55 55 copies,
56 56 error,
57 57 extensions,
58 58 hg,
59 59 mdiff,
60 60 merge,
61 61 revlog,
62 62 util,
63 63 )
64 64
65 65 # for "historical portability":
66 66 # try to import modules separately (in dict order), and ignore
67 67 # failure, because these aren't available with early Mercurial
68 68 try:
69 69 from mercurial import branchmap # since 2.5 (or bcee63733aad)
70 70 except ImportError:
71 71 pass
72 72 try:
73 73 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
74 74 except ImportError:
75 75 pass
76 76 try:
77 77 from mercurial import registrar # since 3.7 (or 37d50250b696)
78 78 dir(registrar) # forcibly load it
79 79 except ImportError:
80 80 registrar = None
81 81 try:
82 82 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
83 83 except ImportError:
84 84 pass
85 85 try:
86 86 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
87 87 except ImportError:
88 88 pass
89 89 try:
90 90 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
91 91 except ImportError:
92 92 pass
93 93
94 94
95 95 def identity(a):
96 96 return a
97 97
98 98 try:
99 99 from mercurial import pycompat
100 100 getargspec = pycompat.getargspec # added to module after 4.5
101 101 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
102 102 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
103 103 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
104 104 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
105 105 if pycompat.ispy3:
106 106 _maxint = sys.maxsize # per py3 docs for replacing maxint
107 107 else:
108 108 _maxint = sys.maxint
109 109 except (ImportError, AttributeError):
110 110 import inspect
111 111 getargspec = inspect.getargspec
112 112 _byteskwargs = identity
113 113 fsencode = identity # no py3 support
114 114 _maxint = sys.maxint # no py3 support
115 115 _sysstr = lambda x: x # no py3 support
116 116 _xrange = xrange
117 117
118 118 try:
119 119 # 4.7+
120 120 queue = pycompat.queue.Queue
121 121 except (AttributeError, ImportError):
122 122 # <4.7.
123 123 try:
124 124 queue = pycompat.queue
125 125 except (AttributeError, ImportError):
126 126 queue = util.queue
127 127
128 128 try:
129 129 from mercurial import logcmdutil
130 130 makelogtemplater = logcmdutil.maketemplater
131 131 except (AttributeError, ImportError):
132 132 try:
133 133 makelogtemplater = cmdutil.makelogtemplater
134 134 except (AttributeError, ImportError):
135 135 makelogtemplater = None
136 136
137 137 # for "historical portability":
138 138 # define util.safehasattr forcibly, because util.safehasattr has been
139 139 # available since 1.9.3 (or 94b200a11cf7)
140 140 _undefined = object()
141 141 def safehasattr(thing, attr):
142 142 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
143 143 setattr(util, 'safehasattr', safehasattr)
144 144
145 145 # for "historical portability":
146 146 # define util.timer forcibly, because util.timer has been available
147 147 # since ae5d60bb70c9
148 148 if safehasattr(time, 'perf_counter'):
149 149 util.timer = time.perf_counter
150 150 elif os.name == b'nt':
151 151 util.timer = time.clock
152 152 else:
153 153 util.timer = time.time
154 154
155 155 # for "historical portability":
156 156 # use locally defined empty option list, if formatteropts isn't
157 157 # available, because commands.formatteropts has been available since
158 158 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
159 159 # available since 2.2 (or ae5f92e154d3)
160 160 formatteropts = getattr(cmdutil, "formatteropts",
161 161 getattr(commands, "formatteropts", []))
162 162
163 163 # for "historical portability":
164 164 # use locally defined option list, if debugrevlogopts isn't available,
165 165 # because commands.debugrevlogopts has been available since 3.7 (or
166 166 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
167 167 # since 1.9 (or a79fea6b3e77).
168 168 revlogopts = getattr(cmdutil, "debugrevlogopts",
169 169 getattr(commands, "debugrevlogopts", [
170 170 (b'c', b'changelog', False, (b'open changelog')),
171 171 (b'm', b'manifest', False, (b'open manifest')),
172 172 (b'', b'dir', False, (b'open directory manifest')),
173 173 ]))
174 174
175 175 cmdtable = {}
176 176
177 177 # for "historical portability":
178 178 # define parsealiases locally, because cmdutil.parsealiases has been
179 179 # available since 1.5 (or 6252852b4332)
180 180 def parsealiases(cmd):
181 181 return cmd.split(b"|")
182 182
183 183 if safehasattr(registrar, 'command'):
184 184 command = registrar.command(cmdtable)
185 185 elif safehasattr(cmdutil, 'command'):
186 186 command = cmdutil.command(cmdtable)
187 187 if b'norepo' not in getargspec(command).args:
188 188 # for "historical portability":
189 189 # wrap original cmdutil.command, because "norepo" option has
190 190 # been available since 3.1 (or 75a96326cecb)
191 191 _command = command
192 192 def command(name, options=(), synopsis=None, norepo=False):
193 193 if norepo:
194 194 commands.norepo += b' %s' % b' '.join(parsealiases(name))
195 195 return _command(name, list(options), synopsis)
196 196 else:
197 197 # for "historical portability":
198 198 # define "@command" annotation locally, because cmdutil.command
199 199 # has been available since 1.9 (or 2daa5179e73f)
200 200 def command(name, options=(), synopsis=None, norepo=False):
201 201 def decorator(func):
202 202 if synopsis:
203 203 cmdtable[name] = func, list(options), synopsis
204 204 else:
205 205 cmdtable[name] = func, list(options)
206 206 if norepo:
207 207 commands.norepo += b' %s' % b' '.join(parsealiases(name))
208 208 return func
209 209 return decorator
210 210
211 211 try:
212 212 import mercurial.registrar
213 213 import mercurial.configitems
214 214 configtable = {}
215 215 configitem = mercurial.registrar.configitem(configtable)
216 216 configitem(b'perf', b'presleep',
217 217 default=mercurial.configitems.dynamicdefault,
218 218 )
219 219 configitem(b'perf', b'stub',
220 220 default=mercurial.configitems.dynamicdefault,
221 221 )
222 222 configitem(b'perf', b'parentscount',
223 223 default=mercurial.configitems.dynamicdefault,
224 224 )
225 225 configitem(b'perf', b'all-timing',
226 226 default=mercurial.configitems.dynamicdefault,
227 227 )
228 228 except (ImportError, AttributeError):
229 229 pass
230 230
231 231 def getlen(ui):
232 232 if ui.configbool(b"perf", b"stub", False):
233 233 return lambda x: 1
234 234 return len
235 235
236 236 def gettimer(ui, opts=None):
237 237 """return a timer function and formatter: (timer, formatter)
238 238
239 239 This function exists to gather the creation of formatter in a single
240 240 place instead of duplicating it in all performance commands."""
241 241
242 242 # enforce an idle period before execution to counteract power management
243 243 # experimental config: perf.presleep
244 244 time.sleep(getint(ui, b"perf", b"presleep", 1))
245 245
246 246 if opts is None:
247 247 opts = {}
248 248 # redirect all to stderr unless buffer api is in use
249 249 if not ui._buffers:
250 250 ui = ui.copy()
251 251 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
252 252 if uifout:
253 253 # for "historical portability":
254 254 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
255 255 uifout.set(ui.ferr)
256 256
257 257 # get a formatter
258 258 uiformatter = getattr(ui, 'formatter', None)
259 259 if uiformatter:
260 260 fm = uiformatter(b'perf', opts)
261 261 else:
262 262 # for "historical portability":
263 263 # define formatter locally, because ui.formatter has been
264 264 # available since 2.2 (or ae5f92e154d3)
265 265 from mercurial import node
266 266 class defaultformatter(object):
267 267 """Minimized composition of baseformatter and plainformatter
268 268 """
269 269 def __init__(self, ui, topic, opts):
270 270 self._ui = ui
271 271 if ui.debugflag:
272 272 self.hexfunc = node.hex
273 273 else:
274 274 self.hexfunc = node.short
275 275 def __nonzero__(self):
276 276 return False
277 277 __bool__ = __nonzero__
278 278 def startitem(self):
279 279 pass
280 280 def data(self, **data):
281 281 pass
282 282 def write(self, fields, deftext, *fielddata, **opts):
283 283 self._ui.write(deftext % fielddata, **opts)
284 284 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
285 285 if cond:
286 286 self._ui.write(deftext % fielddata, **opts)
287 287 def plain(self, text, **opts):
288 288 self._ui.write(text, **opts)
289 289 def end(self):
290 290 pass
291 291 fm = defaultformatter(ui, b'perf', opts)
292 292
293 293 # stub function, runs code only once instead of in a loop
294 294 # experimental config: perf.stub
295 295 if ui.configbool(b"perf", b"stub", False):
296 296 return functools.partial(stub_timer, fm), fm
297 297
298 298 # experimental config: perf.all-timing
299 299 displayall = ui.configbool(b"perf", b"all-timing", False)
300 300 return functools.partial(_timer, fm, displayall=displayall), fm
301 301
302 302 def stub_timer(fm, func, setup=None, title=None):
303 303 if setup is not None:
304 304 setup()
305 305 func()
306 306
307 307 @contextlib.contextmanager
308 308 def timeone():
309 309 r = []
310 310 ostart = os.times()
311 311 cstart = util.timer()
312 312 yield r
313 313 cstop = util.timer()
314 314 ostop = os.times()
315 315 a, b = ostart, ostop
316 316 r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
317 317
318 318 def _timer(fm, func, setup=None, title=None, displayall=False):
319 319 gc.collect()
320 320 results = []
321 321 begin = util.timer()
322 322 count = 0
323 323 while True:
324 324 if setup is not None:
325 325 setup()
326 326 with timeone() as item:
327 327 r = func()
328 328 count += 1
329 329 results.append(item[0])
330 330 cstop = util.timer()
331 331 if cstop - begin > 3 and count >= 100:
332 332 break
333 333 if cstop - begin > 10 and count >= 3:
334 334 break
335 335
336 336 formatone(fm, results, title=title, result=r,
337 337 displayall=displayall)
338 338
339 339 def formatone(fm, timings, title=None, result=None, displayall=False):
340 340
341 341 count = len(timings)
342 342
343 343 fm.startitem()
344 344
345 345 if title:
346 346 fm.write(b'title', b'! %s\n', title)
347 347 if result:
348 348 fm.write(b'result', b'! result: %s\n', result)
349 349 def display(role, entry):
350 350 prefix = b''
351 351 if role != b'best':
352 352 prefix = b'%s.' % role
353 353 fm.plain(b'!')
354 354 fm.write(prefix + b'wall', b' wall %f', entry[0])
355 355 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
356 356 fm.write(prefix + b'user', b' user %f', entry[1])
357 357 fm.write(prefix + b'sys', b' sys %f', entry[2])
358 358 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
359 359 fm.plain(b'\n')
360 360 timings.sort()
361 361 min_val = timings[0]
362 362 display(b'best', min_val)
363 363 if displayall:
364 364 max_val = timings[-1]
365 365 display(b'max', max_val)
366 366 avg = tuple([sum(x) / count for x in zip(*timings)])
367 367 display(b'avg', avg)
368 368 median = timings[len(timings) // 2]
369 369 display(b'median', median)
370 370
371 371 # utilities for historical portability
372 372
373 373 def getint(ui, section, name, default):
374 374 # for "historical portability":
375 375 # ui.configint has been available since 1.9 (or fa2b596db182)
376 376 v = ui.config(section, name, None)
377 377 if v is None:
378 378 return default
379 379 try:
380 380 return int(v)
381 381 except ValueError:
382 382 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
383 383 % (section, name, v))
384 384
385 385 def safeattrsetter(obj, name, ignoremissing=False):
386 386 """Ensure that 'obj' has 'name' attribute before subsequent setattr
387 387
388 388 This function is aborted, if 'obj' doesn't have 'name' attribute
389 389 at runtime. This avoids overlooking removal of an attribute, which
390 390 breaks assumption of performance measurement, in the future.
391 391
392 392 This function returns the object to (1) assign a new value, and
393 393 (2) restore an original value to the attribute.
394 394
395 395 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
396 396 abortion, and this function returns None. This is useful to
397 397 examine an attribute, which isn't ensured in all Mercurial
398 398 versions.
399 399 """
400 400 if not util.safehasattr(obj, name):
401 401 if ignoremissing:
402 402 return None
403 403 raise error.Abort((b"missing attribute %s of %s might break assumption"
404 404 b" of performance measurement") % (name, obj))
405 405
406 406 origvalue = getattr(obj, _sysstr(name))
407 407 class attrutil(object):
408 408 def set(self, newvalue):
409 409 setattr(obj, _sysstr(name), newvalue)
410 410 def restore(self):
411 411 setattr(obj, _sysstr(name), origvalue)
412 412
413 413 return attrutil()
414 414
415 415 # utilities to examine each internal API changes
416 416
417 417 def getbranchmapsubsettable():
418 418 # for "historical portability":
419 419 # subsettable is defined in:
420 420 # - branchmap since 2.9 (or 175c6fd8cacc)
421 421 # - repoview since 2.5 (or 59a9f18d4587)
422 422 for mod in (branchmap, repoview):
423 423 subsettable = getattr(mod, 'subsettable', None)
424 424 if subsettable:
425 425 return subsettable
426 426
427 427 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
428 428 # branchmap and repoview modules exist, but subsettable attribute
429 429 # doesn't)
430 430 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
431 431 hint=b"use 2.5 or later")
432 432
433 433 def getsvfs(repo):
434 434 """Return appropriate object to access files under .hg/store
435 435 """
436 436 # for "historical portability":
437 437 # repo.svfs has been available since 2.3 (or 7034365089bf)
438 438 svfs = getattr(repo, 'svfs', None)
439 439 if svfs:
440 440 return svfs
441 441 else:
442 442 return getattr(repo, 'sopener')
443 443
444 444 def getvfs(repo):
445 445 """Return appropriate object to access files under .hg
446 446 """
447 447 # for "historical portability":
448 448 # repo.vfs has been available since 2.3 (or 7034365089bf)
449 449 vfs = getattr(repo, 'vfs', None)
450 450 if vfs:
451 451 return vfs
452 452 else:
453 453 return getattr(repo, 'opener')
454 454
455 455 def repocleartagscachefunc(repo):
456 456 """Return the function to clear tags cache according to repo internal API
457 457 """
458 458 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
459 459 # in this case, setattr(repo, '_tagscache', None) or so isn't
460 460 # correct way to clear tags cache, because existing code paths
461 461 # expect _tagscache to be a structured object.
462 462 def clearcache():
463 463 # _tagscache has been filteredpropertycache since 2.5 (or
464 464 # 98c867ac1330), and delattr() can't work in such case
465 465 if b'_tagscache' in vars(repo):
466 466 del repo.__dict__[b'_tagscache']
467 467 return clearcache
468 468
469 469 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
470 470 if repotags: # since 1.4 (or 5614a628d173)
471 471 return lambda : repotags.set(None)
472 472
473 473 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
474 474 if repotagscache: # since 0.6 (or d7df759d0e97)
475 475 return lambda : repotagscache.set(None)
476 476
477 477 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
478 478 # this point, but it isn't so problematic, because:
479 479 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
480 480 # in perftags() causes failure soon
481 481 # - perf.py itself has been available since 1.1 (or eb240755386d)
482 482 raise error.Abort((b"tags API of this hg command is unknown"))
483 483
484 484 # utilities to clear cache
485 485
486 486 def clearfilecache(obj, attrname):
487 487 unfiltered = getattr(obj, 'unfiltered', None)
488 488 if unfiltered is not None:
489 489 obj = obj.unfiltered()
490 490 if attrname in vars(obj):
491 491 delattr(obj, attrname)
492 492 obj._filecache.pop(attrname, None)
493 493
494 494 def clearchangelog(repo):
495 495 if repo is not repo.unfiltered():
496 496 object.__setattr__(repo, r'_clcachekey', None)
497 497 object.__setattr__(repo, r'_clcache', None)
498 498 clearfilecache(repo.unfiltered(), 'changelog')
499 499
500 500 # perf commands
501 501
502 502 @command(b'perfwalk', formatteropts)
503 503 def perfwalk(ui, repo, *pats, **opts):
504 504 opts = _byteskwargs(opts)
505 505 timer, fm = gettimer(ui, opts)
506 506 m = scmutil.match(repo[None], pats, {})
507 507 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
508 508 ignored=False))))
509 509 fm.end()
510 510
511 511 @command(b'perfannotate', formatteropts)
512 512 def perfannotate(ui, repo, f, **opts):
513 513 opts = _byteskwargs(opts)
514 514 timer, fm = gettimer(ui, opts)
515 515 fc = repo[b'.'][f]
516 516 timer(lambda: len(fc.annotate(True)))
517 517 fm.end()
518 518
519 519 @command(b'perfstatus',
520 520 [(b'u', b'unknown', False,
521 521 b'ask status to look for unknown files')] + formatteropts)
522 522 def perfstatus(ui, repo, **opts):
523 523 opts = _byteskwargs(opts)
524 524 #m = match.always(repo.root, repo.getcwd())
525 525 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
526 526 # False))))
527 527 timer, fm = gettimer(ui, opts)
528 528 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
529 529 fm.end()
530 530
531 531 @command(b'perfaddremove', formatteropts)
532 532 def perfaddremove(ui, repo, **opts):
533 533 opts = _byteskwargs(opts)
534 534 timer, fm = gettimer(ui, opts)
535 535 try:
536 536 oldquiet = repo.ui.quiet
537 537 repo.ui.quiet = True
538 538 matcher = scmutil.match(repo[None])
539 539 opts[b'dry_run'] = True
540 540 if b'uipathfn' in getargspec(scmutil.addremove).args:
541 541 uipathfn = scmutil.getuipathfn(repo)
542 542 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
543 543 else:
544 544 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
545 545 finally:
546 546 repo.ui.quiet = oldquiet
547 547 fm.end()
548 548
549 549 def clearcaches(cl):
550 550 # behave somewhat consistently across internal API changes
551 551 if util.safehasattr(cl, b'clearcaches'):
552 552 cl.clearcaches()
553 553 elif util.safehasattr(cl, b'_nodecache'):
554 554 from mercurial.node import nullid, nullrev
555 555 cl._nodecache = {nullid: nullrev}
556 556 cl._nodepos = None
557 557
558 558 @command(b'perfheads', formatteropts)
559 559 def perfheads(ui, repo, **opts):
560 560 """benchmark the computation of a changelog heads"""
561 561 opts = _byteskwargs(opts)
562 562 timer, fm = gettimer(ui, opts)
563 563 cl = repo.changelog
564 564 def s():
565 565 clearcaches(cl)
566 566 def d():
567 567 len(cl.headrevs())
568 568 timer(d, setup=s)
569 569 fm.end()
570 570
571 571 @command(b'perftags', formatteropts+
572 572 [
573 573 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
574 574 ])
575 575 def perftags(ui, repo, **opts):
576 576 opts = _byteskwargs(opts)
577 577 timer, fm = gettimer(ui, opts)
578 578 repocleartagscache = repocleartagscachefunc(repo)
579 579 clearrevlogs = opts[b'clear_revlogs']
580 580 def s():
581 581 if clearrevlogs:
582 582 clearchangelog(repo)
583 583 clearfilecache(repo.unfiltered(), 'manifest')
584 584 repocleartagscache()
585 585 def t():
586 586 return len(repo.tags())
587 587 timer(t, setup=s)
588 588 fm.end()
589 589
590 590 @command(b'perfancestors', formatteropts)
591 591 def perfancestors(ui, repo, **opts):
592 592 opts = _byteskwargs(opts)
593 593 timer, fm = gettimer(ui, opts)
594 594 heads = repo.changelog.headrevs()
595 595 def d():
596 596 for a in repo.changelog.ancestors(heads):
597 597 pass
598 598 timer(d)
599 599 fm.end()
600 600
601 601 @command(b'perfancestorset', formatteropts)
602 602 def perfancestorset(ui, repo, revset, **opts):
603 603 opts = _byteskwargs(opts)
604 604 timer, fm = gettimer(ui, opts)
605 605 revs = repo.revs(revset)
606 606 heads = repo.changelog.headrevs()
607 607 def d():
608 608 s = repo.changelog.ancestors(heads)
609 609 for rev in revs:
610 610 rev in s
611 611 timer(d)
612 612 fm.end()
613 613
614 614 @command(b'perfdiscovery', formatteropts, b'PATH')
615 615 def perfdiscovery(ui, repo, path, **opts):
616 616 """benchmark discovery between local repo and the peer at given path
617 617 """
618 618 repos = [repo, None]
619 619 timer, fm = gettimer(ui, opts)
620 620 path = ui.expandpath(path)
621 621
622 622 def s():
623 623 repos[1] = hg.peer(ui, opts, path)
624 624 def d():
625 625 setdiscovery.findcommonheads(ui, *repos)
626 626 timer(d, setup=s)
627 627 fm.end()
628 628
629 629 @command(b'perfbookmarks', formatteropts +
630 630 [
631 631 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
632 632 ])
633 633 def perfbookmarks(ui, repo, **opts):
634 634 """benchmark parsing bookmarks from disk to memory"""
635 635 opts = _byteskwargs(opts)
636 636 timer, fm = gettimer(ui, opts)
637 637
638 638 clearrevlogs = opts[b'clear_revlogs']
639 639 def s():
640 640 if clearrevlogs:
641 641 clearchangelog(repo)
642 642 clearfilecache(repo, b'_bookmarks')
643 643 def d():
644 644 repo._bookmarks
645 645 timer(d, setup=s)
646 646 fm.end()
647 647
648 648 @command(b'perfbundleread', formatteropts, b'BUNDLE')
649 649 def perfbundleread(ui, repo, bundlepath, **opts):
650 650 """Benchmark reading of bundle files.
651 651
652 652 This command is meant to isolate the I/O part of bundle reading as
653 653 much as possible.
654 654 """
655 655 from mercurial import (
656 656 bundle2,
657 657 exchange,
658 658 streamclone,
659 659 )
660 660
661 661 opts = _byteskwargs(opts)
662 662
663 663 def makebench(fn):
664 664 def run():
665 665 with open(bundlepath, b'rb') as fh:
666 666 bundle = exchange.readbundle(ui, fh, bundlepath)
667 667 fn(bundle)
668 668
669 669 return run
670 670
671 671 def makereadnbytes(size):
672 672 def run():
673 673 with open(bundlepath, b'rb') as fh:
674 674 bundle = exchange.readbundle(ui, fh, bundlepath)
675 675 while bundle.read(size):
676 676 pass
677 677
678 678 return run
679 679
680 680 def makestdioread(size):
681 681 def run():
682 682 with open(bundlepath, b'rb') as fh:
683 683 while fh.read(size):
684 684 pass
685 685
686 686 return run
687 687
688 688 # bundle1
689 689
690 690 def deltaiter(bundle):
691 691 for delta in bundle.deltaiter():
692 692 pass
693 693
694 694 def iterchunks(bundle):
695 695 for chunk in bundle.getchunks():
696 696 pass
697 697
698 698 # bundle2
699 699
700 700 def forwardchunks(bundle):
701 701 for chunk in bundle._forwardchunks():
702 702 pass
703 703
704 704 def iterparts(bundle):
705 705 for part in bundle.iterparts():
706 706 pass
707 707
708 708 def iterpartsseekable(bundle):
709 709 for part in bundle.iterparts(seekable=True):
710 710 pass
711 711
712 712 def seek(bundle):
713 713 for part in bundle.iterparts(seekable=True):
714 714 part.seek(0, os.SEEK_END)
715 715
716 716 def makepartreadnbytes(size):
717 717 def run():
718 718 with open(bundlepath, b'rb') as fh:
719 719 bundle = exchange.readbundle(ui, fh, bundlepath)
720 720 for part in bundle.iterparts():
721 721 while part.read(size):
722 722 pass
723 723
724 724 return run
725 725
726 726 benches = [
727 727 (makestdioread(8192), b'read(8k)'),
728 728 (makestdioread(16384), b'read(16k)'),
729 729 (makestdioread(32768), b'read(32k)'),
730 730 (makestdioread(131072), b'read(128k)'),
731 731 ]
732 732
733 733 with open(bundlepath, b'rb') as fh:
734 734 bundle = exchange.readbundle(ui, fh, bundlepath)
735 735
736 736 if isinstance(bundle, changegroup.cg1unpacker):
737 737 benches.extend([
738 738 (makebench(deltaiter), b'cg1 deltaiter()'),
739 739 (makebench(iterchunks), b'cg1 getchunks()'),
740 740 (makereadnbytes(8192), b'cg1 read(8k)'),
741 741 (makereadnbytes(16384), b'cg1 read(16k)'),
742 742 (makereadnbytes(32768), b'cg1 read(32k)'),
743 743 (makereadnbytes(131072), b'cg1 read(128k)'),
744 744 ])
745 745 elif isinstance(bundle, bundle2.unbundle20):
746 746 benches.extend([
747 747 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
748 748 (makebench(iterparts), b'bundle2 iterparts()'),
749 749 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
750 750 (makebench(seek), b'bundle2 part seek()'),
751 751 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
752 752 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
753 753 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
754 754 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
755 755 ])
756 756 elif isinstance(bundle, streamclone.streamcloneapplier):
757 757 raise error.Abort(b'stream clone bundles not supported')
758 758 else:
759 759 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
760 760
761 761 for fn, title in benches:
762 762 timer, fm = gettimer(ui, opts)
763 763 timer(fn, title=title)
764 764 fm.end()
765 765
766 766 @command(b'perfchangegroupchangelog', formatteropts +
767 767 [(b'', b'cgversion', b'02', b'changegroup version'),
768 768 (b'r', b'rev', b'', b'revisions to add to changegroup')])
769 769 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
770 770 """Benchmark producing a changelog group for a changegroup.
771 771
772 772 This measures the time spent processing the changelog during a
773 773 bundle operation. This occurs during `hg bundle` and on a server
774 774 processing a `getbundle` wire protocol request (handles clones
775 775 and pull requests).
776 776
777 777 By default, all revisions are added to the changegroup.
778 778 """
779 779 opts = _byteskwargs(opts)
780 780 cl = repo.changelog
781 781 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
782 782 bundler = changegroup.getbundler(cgversion, repo)
783 783
784 784 def d():
785 785 state, chunks = bundler._generatechangelog(cl, nodes)
786 786 for chunk in chunks:
787 787 pass
788 788
789 789 timer, fm = gettimer(ui, opts)
790 790
791 791 # Terminal printing can interfere with timing. So disable it.
792 792 with ui.configoverride({(b'progress', b'disable'): True}):
793 793 timer(d)
794 794
795 795 fm.end()
796 796
797 797 @command(b'perfdirs', formatteropts)
798 798 def perfdirs(ui, repo, **opts):
799 799 opts = _byteskwargs(opts)
800 800 timer, fm = gettimer(ui, opts)
801 801 dirstate = repo.dirstate
802 802 b'a' in dirstate
803 803 def d():
804 804 dirstate.hasdir(b'a')
805 805 del dirstate._map._dirs
806 806 timer(d)
807 807 fm.end()
808 808
809 809 @command(b'perfdirstate', formatteropts)
810 810 def perfdirstate(ui, repo, **opts):
811 811 opts = _byteskwargs(opts)
812 812 timer, fm = gettimer(ui, opts)
813 813 b"a" in repo.dirstate
814 814 def d():
815 815 repo.dirstate.invalidate()
816 816 b"a" in repo.dirstate
817 817 timer(d)
818 818 fm.end()
819 819
820 820 @command(b'perfdirstatedirs', formatteropts)
821 821 def perfdirstatedirs(ui, repo, **opts):
822 822 opts = _byteskwargs(opts)
823 823 timer, fm = gettimer(ui, opts)
824 824 b"a" in repo.dirstate
825 825 def d():
826 826 repo.dirstate.hasdir(b"a")
827 827 del repo.dirstate._map._dirs
828 828 timer(d)
829 829 fm.end()
830 830
831 831 @command(b'perfdirstatefoldmap', formatteropts)
832 832 def perfdirstatefoldmap(ui, repo, **opts):
833 833 opts = _byteskwargs(opts)
834 834 timer, fm = gettimer(ui, opts)
835 835 dirstate = repo.dirstate
836 836 b'a' in dirstate
837 837 def d():
838 838 dirstate._map.filefoldmap.get(b'a')
839 839 del dirstate._map.filefoldmap
840 840 timer(d)
841 841 fm.end()
842 842
843 843 @command(b'perfdirfoldmap', formatteropts)
844 844 def perfdirfoldmap(ui, repo, **opts):
845 845 opts = _byteskwargs(opts)
846 846 timer, fm = gettimer(ui, opts)
847 847 dirstate = repo.dirstate
848 848 b'a' in dirstate
849 849 def d():
850 850 dirstate._map.dirfoldmap.get(b'a')
851 851 del dirstate._map.dirfoldmap
852 852 del dirstate._map._dirs
853 853 timer(d)
854 854 fm.end()
855 855
856 856 @command(b'perfdirstatewrite', formatteropts)
857 857 def perfdirstatewrite(ui, repo, **opts):
858 858 opts = _byteskwargs(opts)
859 859 timer, fm = gettimer(ui, opts)
860 860 ds = repo.dirstate
861 861 b"a" in ds
862 862 def d():
863 863 ds._dirty = True
864 864 ds.write(repo.currenttransaction())
865 865 timer(d)
866 866 fm.end()
867 867
868 868 @command(b'perfmergecalculate',
869 869 [(b'r', b'rev', b'.', b'rev to merge against')] + formatteropts)
870 870 def perfmergecalculate(ui, repo, rev, **opts):
871 871 opts = _byteskwargs(opts)
872 872 timer, fm = gettimer(ui, opts)
873 873 wctx = repo[None]
874 874 rctx = scmutil.revsingle(repo, rev, rev)
875 875 ancestor = wctx.ancestor(rctx)
876 876 # we don't want working dir files to be stat'd in the benchmark, so prime
877 877 # that cache
878 878 wctx.dirty()
879 879 def d():
880 880 # acceptremote is True because we don't want prompts in the middle of
881 881 # our benchmark
882 882 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
883 883 acceptremote=True, followcopies=True)
884 884 timer(d)
885 885 fm.end()
886 886
887 887 @command(b'perfpathcopies', [], b"REV REV")
888 888 def perfpathcopies(ui, repo, rev1, rev2, **opts):
889 889 """benchmark the copy tracing logic"""
890 890 opts = _byteskwargs(opts)
891 891 timer, fm = gettimer(ui, opts)
892 892 ctx1 = scmutil.revsingle(repo, rev1, rev1)
893 893 ctx2 = scmutil.revsingle(repo, rev2, rev2)
894 894 def d():
895 895 copies.pathcopies(ctx1, ctx2)
896 896 timer(d)
897 897 fm.end()
898 898
899 899 @command(b'perfphases',
900 900 [(b'', b'full', False, b'include file reading time too'),
901 901 ], b"")
902 902 def perfphases(ui, repo, **opts):
903 903 """benchmark phasesets computation"""
904 904 opts = _byteskwargs(opts)
905 905 timer, fm = gettimer(ui, opts)
906 906 _phases = repo._phasecache
907 907 full = opts.get(b'full')
908 908 def d():
909 909 phases = _phases
910 910 if full:
911 911 clearfilecache(repo, b'_phasecache')
912 912 phases = repo._phasecache
913 913 phases.invalidate()
914 914 phases.loadphaserevs(repo)
915 915 timer(d)
916 916 fm.end()
917 917
918 918 @command(b'perfphasesremote',
919 919 [], b"[DEST]")
920 920 def perfphasesremote(ui, repo, dest=None, **opts):
921 921 """benchmark time needed to analyse phases of the remote server"""
922 922 from mercurial.node import (
923 923 bin,
924 924 )
925 925 from mercurial import (
926 926 exchange,
927 927 hg,
928 928 phases,
929 929 )
930 930 opts = _byteskwargs(opts)
931 931 timer, fm = gettimer(ui, opts)
932 932
933 933 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
934 934 if not path:
935 935 raise error.Abort((b'default repository not configured!'),
936 936 hint=(b"see 'hg help config.paths'"))
937 937 dest = path.pushloc or path.loc
938 938 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
939 939 other = hg.peer(repo, opts, dest)
940 940
941 941 # easier to perform discovery through the operation
942 942 op = exchange.pushoperation(repo, other)
943 943 exchange._pushdiscoverychangeset(op)
944 944
945 945 remotesubset = op.fallbackheads
946 946
947 947 with other.commandexecutor() as e:
948 948 remotephases = e.callcommand(b'listkeys',
949 949 {b'namespace': b'phases'}).result()
950 950 del other
951 951 publishing = remotephases.get(b'publishing', False)
952 952 if publishing:
953 953 ui.status((b'publishing: yes\n'))
954 954 else:
955 955 ui.status((b'publishing: no\n'))
956 956
957 957 nodemap = repo.changelog.nodemap
958 958 nonpublishroots = 0
959 959 for nhex, phase in remotephases.iteritems():
960 960 if nhex == b'publishing': # ignore data related to publish option
961 961 continue
962 962 node = bin(nhex)
963 963 if node in nodemap and int(phase):
964 964 nonpublishroots += 1
965 965 ui.status((b'number of roots: %d\n') % len(remotephases))
966 966 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
967 967 def d():
968 968 phases.remotephasessummary(repo,
969 969 remotesubset,
970 970 remotephases)
971 971 timer(d)
972 972 fm.end()
973 973
974 974 @command(b'perfmanifest',[
975 975 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
976 976 (b'', b'clear-disk', False, b'clear on-disk caches too'),
977 977 ] + formatteropts, b'REV|NODE')
978 978 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
979 979 """benchmark the time to read a manifest from disk and return a usable
980 980 dict-like object
981 981
982 982 Manifest caches are cleared before retrieval."""
983 983 opts = _byteskwargs(opts)
984 984 timer, fm = gettimer(ui, opts)
985 985 if not manifest_rev:
986 986 ctx = scmutil.revsingle(repo, rev, rev)
987 987 t = ctx.manifestnode()
988 988 else:
989 989 from mercurial.node import bin
990 990
991 991 if len(rev) == 40:
992 992 t = bin(rev)
993 993 else:
994 994 try:
995 995 rev = int(rev)
996 996
997 997 if util.safehasattr(repo.manifestlog, b'getstorage'):
998 998 t = repo.manifestlog.getstorage(b'').node(rev)
999 999 else:
1000 1000 t = repo.manifestlog._revlog.lookup(rev)
1001 1001 except ValueError:
1002 1002 raise error.Abort(b'manifest revision must be integer or full '
1003 1003 b'node')
1004 1004 def d():
1005 1005 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1006 1006 repo.manifestlog[t].read()
1007 1007 timer(d)
1008 1008 fm.end()
1009 1009
1010 1010 @command(b'perfchangeset', formatteropts)
1011 1011 def perfchangeset(ui, repo, rev, **opts):
1012 1012 opts = _byteskwargs(opts)
1013 1013 timer, fm = gettimer(ui, opts)
1014 1014 n = scmutil.revsingle(repo, rev).node()
1015 1015 def d():
1016 1016 repo.changelog.read(n)
1017 1017 #repo.changelog._cache = None
1018 1018 timer(d)
1019 1019 fm.end()
1020 1020
1021 1021 @command(b'perfignore', formatteropts)
1022 1022 def perfignore(ui, repo, **opts):
1023 1023 """benchmark operation related to computing ignore"""
1024 1024 opts = _byteskwargs(opts)
1025 1025 timer, fm = gettimer(ui, opts)
1026 1026 dirstate = repo.dirstate
1027 1027
1028 1028 def setupone():
1029 1029 dirstate.invalidate()
1030 1030 clearfilecache(dirstate, b'_ignore')
1031 1031
1032 1032 def runone():
1033 1033 dirstate._ignore
1034 1034
1035 1035 timer(runone, setup=setupone, title=b"load")
1036 1036 fm.end()
1037 1037
1038 1038 @command(b'perfindex', [
1039 1039 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1040 1040 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1041 1041 ] + formatteropts)
1042 1042 def perfindex(ui, repo, **opts):
1043 1043 """benchmark index creation time followed by a lookup
1044 1044
1045 1045 The default is to look `tip` up. Depending on the index implementation,
1046 1046 the revision looked up can matters. For example, an implementation
1047 1047 scanning the index will have a faster lookup time for `--rev tip` than for
1048 1048 `--rev 0`. The number of looked up revisions and their order can also
1049 1049 matters.
1050 1050
1051 1051 Example of useful set to test:
1052 1052 * tip
1053 1053 * 0
1054 1054 * -10:
1055 1055 * :10
1056 1056 * -10: + :10
1057 1057 * :10: + -10:
1058 1058 * -10000:
1059 1059 * -10000: + 0
1060 1060
1061 1061 It is not currently possible to check for lookup of a missing node. For
1062 1062 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1063 1063 import mercurial.revlog
1064 1064 opts = _byteskwargs(opts)
1065 1065 timer, fm = gettimer(ui, opts)
1066 1066 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1067 1067 if opts[b'no_lookup']:
1068 1068 if opts['rev']:
1069 1069 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1070 1070 nodes = []
1071 1071 elif not opts[b'rev']:
1072 1072 nodes = [repo[b"tip"].node()]
1073 1073 else:
1074 1074 revs = scmutil.revrange(repo, opts[b'rev'])
1075 1075 cl = repo.changelog
1076 1076 nodes = [cl.node(r) for r in revs]
1077 1077
1078 1078 unfi = repo.unfiltered()
1079 1079 # find the filecache func directly
1080 1080 # This avoid polluting the benchmark with the filecache logic
1081 1081 makecl = unfi.__class__.changelog.func
1082 1082 def setup():
1083 1083 # probably not necessary, but for good measure
1084 1084 clearchangelog(unfi)
1085 1085 def d():
1086 1086 cl = makecl(unfi)
1087 1087 for n in nodes:
1088 1088 cl.rev(n)
1089 1089 timer(d, setup=setup)
1090 1090 fm.end()
1091 1091
1092 1092 @command(b'perfnodemap', [
1093 1093 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1094 1094 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1095 1095 ] + formatteropts)
1096 1096 def perfnodemap(ui, repo, **opts):
1097 1097 """benchmark the time necessary to look up revision from a cold nodemap
1098 1098
1099 1099 Depending on the implementation, the amount and order of revision we look
1100 1100 up can varies. Example of useful set to test:
1101 1101 * tip
1102 1102 * 0
1103 1103 * -10:
1104 1104 * :10
1105 1105 * -10: + :10
1106 1106 * :10: + -10:
1107 1107 * -10000:
1108 1108 * -10000: + 0
1109 1109
1110 1110 The command currently focus on valid binary lookup. Benchmarking for
1111 1111 hexlookup, prefix lookup and missing lookup would also be valuable.
1112 1112 """
1113 1113 import mercurial.revlog
1114 1114 opts = _byteskwargs(opts)
1115 1115 timer, fm = gettimer(ui, opts)
1116 1116 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1117 1117
1118 1118 unfi = repo.unfiltered()
1119 1119 clearcaches = opts['clear_caches']
1120 1120 # find the filecache func directly
1121 1121 # This avoid polluting the benchmark with the filecache logic
1122 1122 makecl = unfi.__class__.changelog.func
1123 1123 if not opts[b'rev']:
1124 1124 raise error.Abort('use --rev to specify revisions to look up')
1125 1125 revs = scmutil.revrange(repo, opts[b'rev'])
1126 1126 cl = repo.changelog
1127 1127 nodes = [cl.node(r) for r in revs]
1128 1128
1129 1129 # use a list to pass reference to a nodemap from one closure to the next
1130 1130 nodeget = [None]
1131 1131 def setnodeget():
1132 1132 # probably not necessary, but for good measure
1133 1133 clearchangelog(unfi)
1134 1134 nodeget[0] = makecl(unfi).nodemap.get
1135 1135
1136 1136 def d():
1137 1137 get = nodeget[0]
1138 1138 for n in nodes:
1139 1139 get(n)
1140 1140
1141 1141 setup = None
1142 1142 if clearcaches:
1143 1143 def setup():
1144 1144 setnodeget()
1145 1145 else:
1146 1146 setnodeget()
1147 1147 d() # prewarm the data structure
1148 1148 timer(d, setup=setup)
1149 1149 fm.end()
1150 1150
1151 1151 @command(b'perfstartup', formatteropts)
1152 1152 def perfstartup(ui, repo, **opts):
1153 1153 opts = _byteskwargs(opts)
1154 1154 timer, fm = gettimer(ui, opts)
1155 1155 def d():
1156 1156 if os.name != r'nt':
1157 1157 os.system(b"HGRCPATH= %s version -q > /dev/null" %
1158 1158 fsencode(sys.argv[0]))
1159 1159 else:
1160 1160 os.environ[r'HGRCPATH'] = r' '
1161 1161 os.system(r"%s version -q > NUL" % sys.argv[0])
1162 1162 timer(d)
1163 1163 fm.end()
1164 1164
1165 1165 @command(b'perfparents', formatteropts)
1166 1166 def perfparents(ui, repo, **opts):
1167 """benchmark the time necessary to fetch one changeset's parents.
1168
1169 The fetch is done using the `node identifier`, traversing all object layer
1170 from the repository object. The N first revision will be used for this
1171 benchmark. N is controlled by the ``perf.parentscount`` config option
1172 (default: 1000).
1173 """
1167 1174 opts = _byteskwargs(opts)
1168 1175 timer, fm = gettimer(ui, opts)
1169 1176 # control the number of commits perfparents iterates over
1170 1177 # experimental config: perf.parentscount
1171 1178 count = getint(ui, b"perf", b"parentscount", 1000)
1172 1179 if len(repo.changelog) < count:
1173 1180 raise error.Abort(b"repo needs %d commits for this test" % count)
1174 1181 repo = repo.unfiltered()
1175 1182 nl = [repo.changelog.node(i) for i in _xrange(count)]
1176 1183 def d():
1177 1184 for n in nl:
1178 1185 repo.changelog.parents(n)
1179 1186 timer(d)
1180 1187 fm.end()
1181 1188
1182 1189 @command(b'perfctxfiles', formatteropts)
1183 1190 def perfctxfiles(ui, repo, x, **opts):
1184 1191 opts = _byteskwargs(opts)
1185 1192 x = int(x)
1186 1193 timer, fm = gettimer(ui, opts)
1187 1194 def d():
1188 1195 len(repo[x].files())
1189 1196 timer(d)
1190 1197 fm.end()
1191 1198
1192 1199 @command(b'perfrawfiles', formatteropts)
1193 1200 def perfrawfiles(ui, repo, x, **opts):
1194 1201 opts = _byteskwargs(opts)
1195 1202 x = int(x)
1196 1203 timer, fm = gettimer(ui, opts)
1197 1204 cl = repo.changelog
1198 1205 def d():
1199 1206 len(cl.read(x)[3])
1200 1207 timer(d)
1201 1208 fm.end()
1202 1209
1203 1210 @command(b'perflookup', formatteropts)
1204 1211 def perflookup(ui, repo, rev, **opts):
1205 1212 opts = _byteskwargs(opts)
1206 1213 timer, fm = gettimer(ui, opts)
1207 1214 timer(lambda: len(repo.lookup(rev)))
1208 1215 fm.end()
1209 1216
1210 1217 @command(b'perflinelogedits',
1211 1218 [(b'n', b'edits', 10000, b'number of edits'),
1212 1219 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1213 1220 ], norepo=True)
1214 1221 def perflinelogedits(ui, **opts):
1215 1222 from mercurial import linelog
1216 1223
1217 1224 opts = _byteskwargs(opts)
1218 1225
1219 1226 edits = opts[b'edits']
1220 1227 maxhunklines = opts[b'max_hunk_lines']
1221 1228
1222 1229 maxb1 = 100000
1223 1230 random.seed(0)
1224 1231 randint = random.randint
1225 1232 currentlines = 0
1226 1233 arglist = []
1227 1234 for rev in _xrange(edits):
1228 1235 a1 = randint(0, currentlines)
1229 1236 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1230 1237 b1 = randint(0, maxb1)
1231 1238 b2 = randint(b1, b1 + maxhunklines)
1232 1239 currentlines += (b2 - b1) - (a2 - a1)
1233 1240 arglist.append((rev, a1, a2, b1, b2))
1234 1241
1235 1242 def d():
1236 1243 ll = linelog.linelog()
1237 1244 for args in arglist:
1238 1245 ll.replacelines(*args)
1239 1246
1240 1247 timer, fm = gettimer(ui, opts)
1241 1248 timer(d)
1242 1249 fm.end()
1243 1250
1244 1251 @command(b'perfrevrange', formatteropts)
1245 1252 def perfrevrange(ui, repo, *specs, **opts):
1246 1253 opts = _byteskwargs(opts)
1247 1254 timer, fm = gettimer(ui, opts)
1248 1255 revrange = scmutil.revrange
1249 1256 timer(lambda: len(revrange(repo, specs)))
1250 1257 fm.end()
1251 1258
1252 1259 @command(b'perfnodelookup', formatteropts)
1253 1260 def perfnodelookup(ui, repo, rev, **opts):
1254 1261 opts = _byteskwargs(opts)
1255 1262 timer, fm = gettimer(ui, opts)
1256 1263 import mercurial.revlog
1257 1264 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1258 1265 n = scmutil.revsingle(repo, rev).node()
1259 1266 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1260 1267 def d():
1261 1268 cl.rev(n)
1262 1269 clearcaches(cl)
1263 1270 timer(d)
1264 1271 fm.end()
1265 1272
1266 1273 @command(b'perflog',
1267 1274 [(b'', b'rename', False, b'ask log to follow renames')
1268 1275 ] + formatteropts)
1269 1276 def perflog(ui, repo, rev=None, **opts):
1270 1277 opts = _byteskwargs(opts)
1271 1278 if rev is None:
1272 1279 rev=[]
1273 1280 timer, fm = gettimer(ui, opts)
1274 1281 ui.pushbuffer()
1275 1282 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1276 1283 copies=opts.get(b'rename')))
1277 1284 ui.popbuffer()
1278 1285 fm.end()
1279 1286
1280 1287 @command(b'perfmoonwalk', formatteropts)
1281 1288 def perfmoonwalk(ui, repo, **opts):
1282 1289 """benchmark walking the changelog backwards
1283 1290
1284 1291 This also loads the changelog data for each revision in the changelog.
1285 1292 """
1286 1293 opts = _byteskwargs(opts)
1287 1294 timer, fm = gettimer(ui, opts)
1288 1295 def moonwalk():
1289 1296 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1290 1297 ctx = repo[i]
1291 1298 ctx.branch() # read changelog data (in addition to the index)
1292 1299 timer(moonwalk)
1293 1300 fm.end()
1294 1301
1295 1302 @command(b'perftemplating',
1296 1303 [(b'r', b'rev', [], b'revisions to run the template on'),
1297 1304 ] + formatteropts)
1298 1305 def perftemplating(ui, repo, testedtemplate=None, **opts):
1299 1306 """test the rendering time of a given template"""
1300 1307 if makelogtemplater is None:
1301 1308 raise error.Abort((b"perftemplating not available with this Mercurial"),
1302 1309 hint=b"use 4.3 or later")
1303 1310
1304 1311 opts = _byteskwargs(opts)
1305 1312
1306 1313 nullui = ui.copy()
1307 1314 nullui.fout = open(os.devnull, r'wb')
1308 1315 nullui.disablepager()
1309 1316 revs = opts.get(b'rev')
1310 1317 if not revs:
1311 1318 revs = [b'all()']
1312 1319 revs = list(scmutil.revrange(repo, revs))
1313 1320
1314 1321 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1315 1322 b' {author|person}: {desc|firstline}\n')
1316 1323 if testedtemplate is None:
1317 1324 testedtemplate = defaulttemplate
1318 1325 displayer = makelogtemplater(nullui, repo, testedtemplate)
1319 1326 def format():
1320 1327 for r in revs:
1321 1328 ctx = repo[r]
1322 1329 displayer.show(ctx)
1323 1330 displayer.flush(ctx)
1324 1331
1325 1332 timer, fm = gettimer(ui, opts)
1326 1333 timer(format)
1327 1334 fm.end()
1328 1335
1329 1336 @command(b'perfhelper-pathcopies', formatteropts +
1330 1337 [
1331 1338 (b'r', b'revs', [], b'restrict search to these revisions'),
1332 1339 (b'', b'timing', False, b'provides extra data (costly)'),
1333 1340 ])
1334 1341 def perfhelperpathcopies(ui, repo, revs=[], **opts):
1335 1342 """find statistic about potential parameters for the `perftracecopies`
1336 1343
1337 1344 This command find source-destination pair relevant for copytracing testing.
1338 1345 It report value for some of the parameters that impact copy tracing time.
1339 1346
1340 1347 If `--timing` is set, rename detection is run and the associated timing
1341 1348 will be reported. The extra details comes at the cost of a slower command
1342 1349 execution.
1343 1350
1344 1351 Since the rename detection is only run once, other factors might easily
1345 1352 affect the precision of the timing. However it should give a good
1346 1353 approximation of which revision pairs are very costly.
1347 1354 """
1348 1355 opts = _byteskwargs(opts)
1349 1356 fm = ui.formatter(b'perf', opts)
1350 1357 dotiming = opts[b'timing']
1351 1358
1352 1359 if dotiming:
1353 1360 header = '%12s %12s %12s %12s %12s %12s\n'
1354 1361 output = ("%(source)12s %(destination)12s "
1355 1362 "%(nbrevs)12d %(nbmissingfiles)12d "
1356 1363 "%(nbrenamedfiles)12d %(time)18.5f\n")
1357 1364 header_names = ("source", "destination", "nb-revs", "nb-files",
1358 1365 "nb-renames", "time")
1359 1366 fm.plain(header % header_names)
1360 1367 else:
1361 1368 header = '%12s %12s %12s %12s\n'
1362 1369 output = ("%(source)12s %(destination)12s "
1363 1370 "%(nbrevs)12d %(nbmissingfiles)12d\n")
1364 1371 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
1365 1372
1366 1373 if not revs:
1367 1374 revs = ['all()']
1368 1375 revs = scmutil.revrange(repo, revs)
1369 1376
1370 1377 roi = repo.revs('merge() and %ld', revs)
1371 1378 for r in roi:
1372 1379 ctx = repo[r]
1373 1380 p1 = ctx.p1().rev()
1374 1381 p2 = ctx.p2().rev()
1375 1382 bases = repo.changelog._commonancestorsheads(p1, p2)
1376 1383 for p in (p1, p2):
1377 1384 for b in bases:
1378 1385 base = repo[b]
1379 1386 parent = repo[p]
1380 1387 missing = copies._computeforwardmissing(base, parent)
1381 1388 if not missing:
1382 1389 continue
1383 1390 data = {
1384 1391 b'source': base.hex(),
1385 1392 b'destination': parent.hex(),
1386 1393 b'nbrevs': len(repo.revs('%d::%d', b, p)),
1387 1394 b'nbmissingfiles': len(missing),
1388 1395 }
1389 1396 if dotiming:
1390 1397 begin = util.timer()
1391 1398 renames = copies.pathcopies(base, parent)
1392 1399 end = util.timer()
1393 1400 # not very stable timing since we did only one run
1394 1401 data['time'] = end - begin
1395 1402 data['nbrenamedfiles'] = len(renames)
1396 1403 fm.startitem()
1397 1404 fm.data(**data)
1398 1405 out = data.copy()
1399 1406 out['source'] = fm.hexfunc(base.node())
1400 1407 out['destination'] = fm.hexfunc(parent.node())
1401 1408 fm.plain(output % out)
1402 1409
1403 1410 fm.end()
1404 1411
1405 1412 @command(b'perfcca', formatteropts)
1406 1413 def perfcca(ui, repo, **opts):
1407 1414 opts = _byteskwargs(opts)
1408 1415 timer, fm = gettimer(ui, opts)
1409 1416 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1410 1417 fm.end()
1411 1418
1412 1419 @command(b'perffncacheload', formatteropts)
1413 1420 def perffncacheload(ui, repo, **opts):
1414 1421 opts = _byteskwargs(opts)
1415 1422 timer, fm = gettimer(ui, opts)
1416 1423 s = repo.store
1417 1424 def d():
1418 1425 s.fncache._load()
1419 1426 timer(d)
1420 1427 fm.end()
1421 1428
1422 1429 @command(b'perffncachewrite', formatteropts)
1423 1430 def perffncachewrite(ui, repo, **opts):
1424 1431 opts = _byteskwargs(opts)
1425 1432 timer, fm = gettimer(ui, opts)
1426 1433 s = repo.store
1427 1434 lock = repo.lock()
1428 1435 s.fncache._load()
1429 1436 tr = repo.transaction(b'perffncachewrite')
1430 1437 tr.addbackup(b'fncache')
1431 1438 def d():
1432 1439 s.fncache._dirty = True
1433 1440 s.fncache.write(tr)
1434 1441 timer(d)
1435 1442 tr.close()
1436 1443 lock.release()
1437 1444 fm.end()
1438 1445
1439 1446 @command(b'perffncacheencode', formatteropts)
1440 1447 def perffncacheencode(ui, repo, **opts):
1441 1448 opts = _byteskwargs(opts)
1442 1449 timer, fm = gettimer(ui, opts)
1443 1450 s = repo.store
1444 1451 s.fncache._load()
1445 1452 def d():
1446 1453 for p in s.fncache.entries:
1447 1454 s.encode(p)
1448 1455 timer(d)
1449 1456 fm.end()
1450 1457
1451 1458 def _bdiffworker(q, blocks, xdiff, ready, done):
1452 1459 while not done.is_set():
1453 1460 pair = q.get()
1454 1461 while pair is not None:
1455 1462 if xdiff:
1456 1463 mdiff.bdiff.xdiffblocks(*pair)
1457 1464 elif blocks:
1458 1465 mdiff.bdiff.blocks(*pair)
1459 1466 else:
1460 1467 mdiff.textdiff(*pair)
1461 1468 q.task_done()
1462 1469 pair = q.get()
1463 1470 q.task_done() # for the None one
1464 1471 with ready:
1465 1472 ready.wait()
1466 1473
1467 1474 def _manifestrevision(repo, mnode):
1468 1475 ml = repo.manifestlog
1469 1476
1470 1477 if util.safehasattr(ml, b'getstorage'):
1471 1478 store = ml.getstorage(b'')
1472 1479 else:
1473 1480 store = ml._revlog
1474 1481
1475 1482 return store.revision(mnode)
1476 1483
1477 1484 @command(b'perfbdiff', revlogopts + formatteropts + [
1478 1485 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1479 1486 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1480 1487 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1481 1488 (b'', b'blocks', False, b'test computing diffs into blocks'),
1482 1489 (b'', b'xdiff', False, b'use xdiff algorithm'),
1483 1490 ],
1484 1491
1485 1492 b'-c|-m|FILE REV')
1486 1493 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1487 1494 """benchmark a bdiff between revisions
1488 1495
1489 1496 By default, benchmark a bdiff between its delta parent and itself.
1490 1497
1491 1498 With ``--count``, benchmark bdiffs between delta parents and self for N
1492 1499 revisions starting at the specified revision.
1493 1500
1494 1501 With ``--alldata``, assume the requested revision is a changeset and
1495 1502 measure bdiffs for all changes related to that changeset (manifest
1496 1503 and filelogs).
1497 1504 """
1498 1505 opts = _byteskwargs(opts)
1499 1506
1500 1507 if opts[b'xdiff'] and not opts[b'blocks']:
1501 1508 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1502 1509
1503 1510 if opts[b'alldata']:
1504 1511 opts[b'changelog'] = True
1505 1512
1506 1513 if opts.get(b'changelog') or opts.get(b'manifest'):
1507 1514 file_, rev = None, file_
1508 1515 elif rev is None:
1509 1516 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1510 1517
1511 1518 blocks = opts[b'blocks']
1512 1519 xdiff = opts[b'xdiff']
1513 1520 textpairs = []
1514 1521
1515 1522 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1516 1523
1517 1524 startrev = r.rev(r.lookup(rev))
1518 1525 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1519 1526 if opts[b'alldata']:
1520 1527 # Load revisions associated with changeset.
1521 1528 ctx = repo[rev]
1522 1529 mtext = _manifestrevision(repo, ctx.manifestnode())
1523 1530 for pctx in ctx.parents():
1524 1531 pman = _manifestrevision(repo, pctx.manifestnode())
1525 1532 textpairs.append((pman, mtext))
1526 1533
1527 1534 # Load filelog revisions by iterating manifest delta.
1528 1535 man = ctx.manifest()
1529 1536 pman = ctx.p1().manifest()
1530 1537 for filename, change in pman.diff(man).items():
1531 1538 fctx = repo.file(filename)
1532 1539 f1 = fctx.revision(change[0][0] or -1)
1533 1540 f2 = fctx.revision(change[1][0] or -1)
1534 1541 textpairs.append((f1, f2))
1535 1542 else:
1536 1543 dp = r.deltaparent(rev)
1537 1544 textpairs.append((r.revision(dp), r.revision(rev)))
1538 1545
1539 1546 withthreads = threads > 0
1540 1547 if not withthreads:
1541 1548 def d():
1542 1549 for pair in textpairs:
1543 1550 if xdiff:
1544 1551 mdiff.bdiff.xdiffblocks(*pair)
1545 1552 elif blocks:
1546 1553 mdiff.bdiff.blocks(*pair)
1547 1554 else:
1548 1555 mdiff.textdiff(*pair)
1549 1556 else:
1550 1557 q = queue()
1551 1558 for i in _xrange(threads):
1552 1559 q.put(None)
1553 1560 ready = threading.Condition()
1554 1561 done = threading.Event()
1555 1562 for i in _xrange(threads):
1556 1563 threading.Thread(target=_bdiffworker,
1557 1564 args=(q, blocks, xdiff, ready, done)).start()
1558 1565 q.join()
1559 1566 def d():
1560 1567 for pair in textpairs:
1561 1568 q.put(pair)
1562 1569 for i in _xrange(threads):
1563 1570 q.put(None)
1564 1571 with ready:
1565 1572 ready.notify_all()
1566 1573 q.join()
1567 1574 timer, fm = gettimer(ui, opts)
1568 1575 timer(d)
1569 1576 fm.end()
1570 1577
1571 1578 if withthreads:
1572 1579 done.set()
1573 1580 for i in _xrange(threads):
1574 1581 q.put(None)
1575 1582 with ready:
1576 1583 ready.notify_all()
1577 1584
1578 1585 @command(b'perfunidiff', revlogopts + formatteropts + [
1579 1586 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1580 1587 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
1581 1588 ], b'-c|-m|FILE REV')
1582 1589 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1583 1590 """benchmark a unified diff between revisions
1584 1591
1585 1592 This doesn't include any copy tracing - it's just a unified diff
1586 1593 of the texts.
1587 1594
1588 1595 By default, benchmark a diff between its delta parent and itself.
1589 1596
1590 1597 With ``--count``, benchmark diffs between delta parents and self for N
1591 1598 revisions starting at the specified revision.
1592 1599
1593 1600 With ``--alldata``, assume the requested revision is a changeset and
1594 1601 measure diffs for all changes related to that changeset (manifest
1595 1602 and filelogs).
1596 1603 """
1597 1604 opts = _byteskwargs(opts)
1598 1605 if opts[b'alldata']:
1599 1606 opts[b'changelog'] = True
1600 1607
1601 1608 if opts.get(b'changelog') or opts.get(b'manifest'):
1602 1609 file_, rev = None, file_
1603 1610 elif rev is None:
1604 1611 raise error.CommandError(b'perfunidiff', b'invalid arguments')
1605 1612
1606 1613 textpairs = []
1607 1614
1608 1615 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
1609 1616
1610 1617 startrev = r.rev(r.lookup(rev))
1611 1618 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1612 1619 if opts[b'alldata']:
1613 1620 # Load revisions associated with changeset.
1614 1621 ctx = repo[rev]
1615 1622 mtext = _manifestrevision(repo, ctx.manifestnode())
1616 1623 for pctx in ctx.parents():
1617 1624 pman = _manifestrevision(repo, pctx.manifestnode())
1618 1625 textpairs.append((pman, mtext))
1619 1626
1620 1627 # Load filelog revisions by iterating manifest delta.
1621 1628 man = ctx.manifest()
1622 1629 pman = ctx.p1().manifest()
1623 1630 for filename, change in pman.diff(man).items():
1624 1631 fctx = repo.file(filename)
1625 1632 f1 = fctx.revision(change[0][0] or -1)
1626 1633 f2 = fctx.revision(change[1][0] or -1)
1627 1634 textpairs.append((f1, f2))
1628 1635 else:
1629 1636 dp = r.deltaparent(rev)
1630 1637 textpairs.append((r.revision(dp), r.revision(rev)))
1631 1638
1632 1639 def d():
1633 1640 for left, right in textpairs:
1634 1641 # The date strings don't matter, so we pass empty strings.
1635 1642 headerlines, hunks = mdiff.unidiff(
1636 1643 left, b'', right, b'', b'left', b'right', binary=False)
1637 1644 # consume iterators in roughly the way patch.py does
1638 1645 b'\n'.join(headerlines)
1639 1646 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1640 1647 timer, fm = gettimer(ui, opts)
1641 1648 timer(d)
1642 1649 fm.end()
1643 1650
1644 1651 @command(b'perfdiffwd', formatteropts)
1645 1652 def perfdiffwd(ui, repo, **opts):
1646 1653 """Profile diff of working directory changes"""
1647 1654 opts = _byteskwargs(opts)
1648 1655 timer, fm = gettimer(ui, opts)
1649 1656 options = {
1650 1657 'w': 'ignore_all_space',
1651 1658 'b': 'ignore_space_change',
1652 1659 'B': 'ignore_blank_lines',
1653 1660 }
1654 1661
1655 1662 for diffopt in ('', 'w', 'b', 'B', 'wB'):
1656 1663 opts = dict((options[c], b'1') for c in diffopt)
1657 1664 def d():
1658 1665 ui.pushbuffer()
1659 1666 commands.diff(ui, repo, **opts)
1660 1667 ui.popbuffer()
1661 1668 diffopt = diffopt.encode('ascii')
1662 1669 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
1663 1670 timer(d, title=title)
1664 1671 fm.end()
1665 1672
1666 1673 @command(b'perfrevlogindex', revlogopts + formatteropts,
1667 1674 b'-c|-m|FILE')
1668 1675 def perfrevlogindex(ui, repo, file_=None, **opts):
1669 1676 """Benchmark operations against a revlog index.
1670 1677
1671 1678 This tests constructing a revlog instance, reading index data,
1672 1679 parsing index data, and performing various operations related to
1673 1680 index data.
1674 1681 """
1675 1682
1676 1683 opts = _byteskwargs(opts)
1677 1684
1678 1685 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
1679 1686
1680 1687 opener = getattr(rl, 'opener') # trick linter
1681 1688 indexfile = rl.indexfile
1682 1689 data = opener.read(indexfile)
1683 1690
1684 1691 header = struct.unpack(b'>I', data[0:4])[0]
1685 1692 version = header & 0xFFFF
1686 1693 if version == 1:
1687 1694 revlogio = revlog.revlogio()
1688 1695 inline = header & (1 << 16)
1689 1696 else:
1690 1697 raise error.Abort((b'unsupported revlog version: %d') % version)
1691 1698
1692 1699 rllen = len(rl)
1693 1700
1694 1701 node0 = rl.node(0)
1695 1702 node25 = rl.node(rllen // 4)
1696 1703 node50 = rl.node(rllen // 2)
1697 1704 node75 = rl.node(rllen // 4 * 3)
1698 1705 node100 = rl.node(rllen - 1)
1699 1706
1700 1707 allrevs = range(rllen)
1701 1708 allrevsrev = list(reversed(allrevs))
1702 1709 allnodes = [rl.node(rev) for rev in range(rllen)]
1703 1710 allnodesrev = list(reversed(allnodes))
1704 1711
1705 1712 def constructor():
1706 1713 revlog.revlog(opener, indexfile)
1707 1714
1708 1715 def read():
1709 1716 with opener(indexfile) as fh:
1710 1717 fh.read()
1711 1718
1712 1719 def parseindex():
1713 1720 revlogio.parseindex(data, inline)
1714 1721
1715 1722 def getentry(revornode):
1716 1723 index = revlogio.parseindex(data, inline)[0]
1717 1724 index[revornode]
1718 1725
1719 1726 def getentries(revs, count=1):
1720 1727 index = revlogio.parseindex(data, inline)[0]
1721 1728
1722 1729 for i in range(count):
1723 1730 for rev in revs:
1724 1731 index[rev]
1725 1732
1726 1733 def resolvenode(node):
1727 1734 nodemap = revlogio.parseindex(data, inline)[1]
1728 1735 # This only works for the C code.
1729 1736 if nodemap is None:
1730 1737 return
1731 1738
1732 1739 try:
1733 1740 nodemap[node]
1734 1741 except error.RevlogError:
1735 1742 pass
1736 1743
1737 1744 def resolvenodes(nodes, count=1):
1738 1745 nodemap = revlogio.parseindex(data, inline)[1]
1739 1746 if nodemap is None:
1740 1747 return
1741 1748
1742 1749 for i in range(count):
1743 1750 for node in nodes:
1744 1751 try:
1745 1752 nodemap[node]
1746 1753 except error.RevlogError:
1747 1754 pass
1748 1755
1749 1756 benches = [
1750 1757 (constructor, b'revlog constructor'),
1751 1758 (read, b'read'),
1752 1759 (parseindex, b'create index object'),
1753 1760 (lambda: getentry(0), b'retrieve index entry for rev 0'),
1754 1761 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
1755 1762 (lambda: resolvenode(node0), b'look up node at rev 0'),
1756 1763 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
1757 1764 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
1758 1765 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
1759 1766 (lambda: resolvenode(node100), b'look up node at tip'),
1760 1767 # 2x variation is to measure caching impact.
1761 1768 (lambda: resolvenodes(allnodes),
1762 1769 b'look up all nodes (forward)'),
1763 1770 (lambda: resolvenodes(allnodes, 2),
1764 1771 b'look up all nodes 2x (forward)'),
1765 1772 (lambda: resolvenodes(allnodesrev),
1766 1773 b'look up all nodes (reverse)'),
1767 1774 (lambda: resolvenodes(allnodesrev, 2),
1768 1775 b'look up all nodes 2x (reverse)'),
1769 1776 (lambda: getentries(allrevs),
1770 1777 b'retrieve all index entries (forward)'),
1771 1778 (lambda: getentries(allrevs, 2),
1772 1779 b'retrieve all index entries 2x (forward)'),
1773 1780 (lambda: getentries(allrevsrev),
1774 1781 b'retrieve all index entries (reverse)'),
1775 1782 (lambda: getentries(allrevsrev, 2),
1776 1783 b'retrieve all index entries 2x (reverse)'),
1777 1784 ]
1778 1785
1779 1786 for fn, title in benches:
1780 1787 timer, fm = gettimer(ui, opts)
1781 1788 timer(fn, title=title)
1782 1789 fm.end()
1783 1790
1784 1791 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
1785 1792 [(b'd', b'dist', 100, b'distance between the revisions'),
1786 1793 (b's', b'startrev', 0, b'revision to start reading at'),
1787 1794 (b'', b'reverse', False, b'read in reverse')],
1788 1795 b'-c|-m|FILE')
1789 1796 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
1790 1797 **opts):
1791 1798 """Benchmark reading a series of revisions from a revlog.
1792 1799
1793 1800 By default, we read every ``-d/--dist`` revision from 0 to tip of
1794 1801 the specified revlog.
1795 1802
1796 1803 The start revision can be defined via ``-s/--startrev``.
1797 1804 """
1798 1805 opts = _byteskwargs(opts)
1799 1806
1800 1807 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
1801 1808 rllen = getlen(ui)(rl)
1802 1809
1803 1810 if startrev < 0:
1804 1811 startrev = rllen + startrev
1805 1812
1806 1813 def d():
1807 1814 rl.clearcaches()
1808 1815
1809 1816 beginrev = startrev
1810 1817 endrev = rllen
1811 1818 dist = opts[b'dist']
1812 1819
1813 1820 if reverse:
1814 1821 beginrev, endrev = endrev - 1, beginrev - 1
1815 1822 dist = -1 * dist
1816 1823
1817 1824 for x in _xrange(beginrev, endrev, dist):
1818 1825 # Old revisions don't support passing int.
1819 1826 n = rl.node(x)
1820 1827 rl.revision(n)
1821 1828
1822 1829 timer, fm = gettimer(ui, opts)
1823 1830 timer(d)
1824 1831 fm.end()
1825 1832
1826 1833 @command(b'perfrevlogwrite', revlogopts + formatteropts +
1827 1834 [(b's', b'startrev', 1000, b'revision to start writing at'),
1828 1835 (b'', b'stoprev', -1, b'last revision to write'),
1829 1836 (b'', b'count', 3, b'last revision to write'),
1830 1837 (b'', b'details', False, b'print timing for every revisions tested'),
1831 1838 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
1832 1839 (b'', b'lazydeltabase', True, b'try the provided delta first'),
1833 1840 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1834 1841 ],
1835 1842 b'-c|-m|FILE')
1836 1843 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
1837 1844 """Benchmark writing a series of revisions to a revlog.
1838 1845
1839 1846 Possible source values are:
1840 1847 * `full`: add from a full text (default).
1841 1848 * `parent-1`: add from a delta to the first parent
1842 1849 * `parent-2`: add from a delta to the second parent if it exists
1843 1850 (use a delta from the first parent otherwise)
1844 1851 * `parent-smallest`: add from the smallest delta (either p1 or p2)
1845 1852 * `storage`: add from the existing precomputed deltas
1846 1853 """
1847 1854 opts = _byteskwargs(opts)
1848 1855
1849 1856 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
1850 1857 rllen = getlen(ui)(rl)
1851 1858 if startrev < 0:
1852 1859 startrev = rllen + startrev
1853 1860 if stoprev < 0:
1854 1861 stoprev = rllen + stoprev
1855 1862
1856 1863 lazydeltabase = opts['lazydeltabase']
1857 1864 source = opts['source']
1858 1865 clearcaches = opts['clear_caches']
1859 1866 validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest',
1860 1867 b'storage')
1861 1868 if source not in validsource:
1862 1869 raise error.Abort('invalid source type: %s' % source)
1863 1870
1864 1871 ### actually gather results
1865 1872 count = opts['count']
1866 1873 if count <= 0:
1867 1874 raise error.Abort('invalide run count: %d' % count)
1868 1875 allresults = []
1869 1876 for c in range(count):
1870 1877 timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1,
1871 1878 lazydeltabase=lazydeltabase,
1872 1879 clearcaches=clearcaches)
1873 1880 allresults.append(timing)
1874 1881
1875 1882 ### consolidate the results in a single list
1876 1883 results = []
1877 1884 for idx, (rev, t) in enumerate(allresults[0]):
1878 1885 ts = [t]
1879 1886 for other in allresults[1:]:
1880 1887 orev, ot = other[idx]
1881 1888 assert orev == rev
1882 1889 ts.append(ot)
1883 1890 results.append((rev, ts))
1884 1891 resultcount = len(results)
1885 1892
1886 1893 ### Compute and display relevant statistics
1887 1894
1888 1895 # get a formatter
1889 1896 fm = ui.formatter(b'perf', opts)
1890 1897 displayall = ui.configbool(b"perf", b"all-timing", False)
1891 1898
1892 1899 # print individual details if requested
1893 1900 if opts['details']:
1894 1901 for idx, item in enumerate(results, 1):
1895 1902 rev, data = item
1896 1903 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
1897 1904 formatone(fm, data, title=title, displayall=displayall)
1898 1905
1899 1906 # sorts results by median time
1900 1907 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
1901 1908 # list of (name, index) to display)
1902 1909 relevants = [
1903 1910 ("min", 0),
1904 1911 ("10%", resultcount * 10 // 100),
1905 1912 ("25%", resultcount * 25 // 100),
1906 1913 ("50%", resultcount * 70 // 100),
1907 1914 ("75%", resultcount * 75 // 100),
1908 1915 ("90%", resultcount * 90 // 100),
1909 1916 ("95%", resultcount * 95 // 100),
1910 1917 ("99%", resultcount * 99 // 100),
1911 1918 ("99.9%", resultcount * 999 // 1000),
1912 1919 ("99.99%", resultcount * 9999 // 10000),
1913 1920 ("99.999%", resultcount * 99999 // 100000),
1914 1921 ("max", -1),
1915 1922 ]
1916 1923 if not ui.quiet:
1917 1924 for name, idx in relevants:
1918 1925 data = results[idx]
1919 1926 title = '%s of %d, rev %d' % (name, resultcount, data[0])
1920 1927 formatone(fm, data[1], title=title, displayall=displayall)
1921 1928
1922 1929 # XXX summing that many float will not be very precise, we ignore this fact
1923 1930 # for now
1924 1931 totaltime = []
1925 1932 for item in allresults:
1926 1933 totaltime.append((sum(x[1][0] for x in item),
1927 1934 sum(x[1][1] for x in item),
1928 1935 sum(x[1][2] for x in item),)
1929 1936 )
1930 1937 formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
1931 1938 displayall=displayall)
1932 1939 fm.end()
1933 1940
1934 1941 class _faketr(object):
1935 1942 def add(s, x, y, z=None):
1936 1943 return None
1937 1944
1938 1945 def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None,
1939 1946 lazydeltabase=True, clearcaches=True):
1940 1947 timings = []
1941 1948 tr = _faketr()
1942 1949 with _temprevlog(ui, orig, startrev) as dest:
1943 1950 dest._lazydeltabase = lazydeltabase
1944 1951 revs = list(orig.revs(startrev, stoprev))
1945 1952 total = len(revs)
1946 1953 topic = 'adding'
1947 1954 if runidx is not None:
1948 1955 topic += ' (run #%d)' % runidx
1949 1956 # Support both old and new progress API
1950 1957 if util.safehasattr(ui, 'makeprogress'):
1951 1958 progress = ui.makeprogress(topic, unit='revs', total=total)
1952 1959 def updateprogress(pos):
1953 1960 progress.update(pos)
1954 1961 def completeprogress():
1955 1962 progress.complete()
1956 1963 else:
1957 1964 def updateprogress(pos):
1958 1965 ui.progress(topic, pos, unit='revs', total=total)
1959 1966 def completeprogress():
1960 1967 ui.progress(topic, None, unit='revs', total=total)
1961 1968
1962 1969 for idx, rev in enumerate(revs):
1963 1970 updateprogress(idx)
1964 1971 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
1965 1972 if clearcaches:
1966 1973 dest.index.clearcaches()
1967 1974 dest.clearcaches()
1968 1975 with timeone() as r:
1969 1976 dest.addrawrevision(*addargs, **addkwargs)
1970 1977 timings.append((rev, r[0]))
1971 1978 updateprogress(total)
1972 1979 completeprogress()
1973 1980 return timings
1974 1981
1975 1982 def _getrevisionseed(orig, rev, tr, source):
1976 1983 from mercurial.node import nullid
1977 1984
1978 1985 linkrev = orig.linkrev(rev)
1979 1986 node = orig.node(rev)
1980 1987 p1, p2 = orig.parents(node)
1981 1988 flags = orig.flags(rev)
1982 1989 cachedelta = None
1983 1990 text = None
1984 1991
1985 1992 if source == b'full':
1986 1993 text = orig.revision(rev)
1987 1994 elif source == b'parent-1':
1988 1995 baserev = orig.rev(p1)
1989 1996 cachedelta = (baserev, orig.revdiff(p1, rev))
1990 1997 elif source == b'parent-2':
1991 1998 parent = p2
1992 1999 if p2 == nullid:
1993 2000 parent = p1
1994 2001 baserev = orig.rev(parent)
1995 2002 cachedelta = (baserev, orig.revdiff(parent, rev))
1996 2003 elif source == b'parent-smallest':
1997 2004 p1diff = orig.revdiff(p1, rev)
1998 2005 parent = p1
1999 2006 diff = p1diff
2000 2007 if p2 != nullid:
2001 2008 p2diff = orig.revdiff(p2, rev)
2002 2009 if len(p1diff) > len(p2diff):
2003 2010 parent = p2
2004 2011 diff = p2diff
2005 2012 baserev = orig.rev(parent)
2006 2013 cachedelta = (baserev, diff)
2007 2014 elif source == b'storage':
2008 2015 baserev = orig.deltaparent(rev)
2009 2016 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
2010 2017
2011 2018 return ((text, tr, linkrev, p1, p2),
2012 2019 {'node': node, 'flags': flags, 'cachedelta': cachedelta})
2013 2020
2014 2021 @contextlib.contextmanager
2015 2022 def _temprevlog(ui, orig, truncaterev):
2016 2023 from mercurial import vfs as vfsmod
2017 2024
2018 2025 if orig._inline:
2019 2026 raise error.Abort('not supporting inline revlog (yet)')
2020 2027
2021 2028 origindexpath = orig.opener.join(orig.indexfile)
2022 2029 origdatapath = orig.opener.join(orig.datafile)
2023 2030 indexname = 'revlog.i'
2024 2031 dataname = 'revlog.d'
2025 2032
2026 2033 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
2027 2034 try:
2028 2035 # copy the data file in a temporary directory
2029 2036 ui.debug('copying data in %s\n' % tmpdir)
2030 2037 destindexpath = os.path.join(tmpdir, 'revlog.i')
2031 2038 destdatapath = os.path.join(tmpdir, 'revlog.d')
2032 2039 shutil.copyfile(origindexpath, destindexpath)
2033 2040 shutil.copyfile(origdatapath, destdatapath)
2034 2041
2035 2042 # remove the data we want to add again
2036 2043 ui.debug('truncating data to be rewritten\n')
2037 2044 with open(destindexpath, 'ab') as index:
2038 2045 index.seek(0)
2039 2046 index.truncate(truncaterev * orig._io.size)
2040 2047 with open(destdatapath, 'ab') as data:
2041 2048 data.seek(0)
2042 2049 data.truncate(orig.start(truncaterev))
2043 2050
2044 2051 # instantiate a new revlog from the temporary copy
2045 2052 ui.debug('truncating adding to be rewritten\n')
2046 2053 vfs = vfsmod.vfs(tmpdir)
2047 2054 vfs.options = getattr(orig.opener, 'options', None)
2048 2055
2049 2056 dest = revlog.revlog(vfs,
2050 2057 indexfile=indexname,
2051 2058 datafile=dataname)
2052 2059 if dest._inline:
2053 2060 raise error.Abort('not supporting inline revlog (yet)')
2054 2061 # make sure internals are initialized
2055 2062 dest.revision(len(dest) - 1)
2056 2063 yield dest
2057 2064 del dest, vfs
2058 2065 finally:
2059 2066 shutil.rmtree(tmpdir, True)
2060 2067
2061 2068 @command(b'perfrevlogchunks', revlogopts + formatteropts +
2062 2069 [(b'e', b'engines', b'', b'compression engines to use'),
2063 2070 (b's', b'startrev', 0, b'revision to start at')],
2064 2071 b'-c|-m|FILE')
2065 2072 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
2066 2073 """Benchmark operations on revlog chunks.
2067 2074
2068 2075 Logically, each revlog is a collection of fulltext revisions. However,
2069 2076 stored within each revlog are "chunks" of possibly compressed data. This
2070 2077 data needs to be read and decompressed or compressed and written.
2071 2078
2072 2079 This command measures the time it takes to read+decompress and recompress
2073 2080 chunks in a revlog. It effectively isolates I/O and compression performance.
2074 2081 For measurements of higher-level operations like resolving revisions,
2075 2082 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
2076 2083 """
2077 2084 opts = _byteskwargs(opts)
2078 2085
2079 2086 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
2080 2087
2081 2088 # _chunkraw was renamed to _getsegmentforrevs.
2082 2089 try:
2083 2090 segmentforrevs = rl._getsegmentforrevs
2084 2091 except AttributeError:
2085 2092 segmentforrevs = rl._chunkraw
2086 2093
2087 2094 # Verify engines argument.
2088 2095 if engines:
2089 2096 engines = set(e.strip() for e in engines.split(b','))
2090 2097 for engine in engines:
2091 2098 try:
2092 2099 util.compressionengines[engine]
2093 2100 except KeyError:
2094 2101 raise error.Abort(b'unknown compression engine: %s' % engine)
2095 2102 else:
2096 2103 engines = []
2097 2104 for e in util.compengines:
2098 2105 engine = util.compengines[e]
2099 2106 try:
2100 2107 if engine.available():
2101 2108 engine.revlogcompressor().compress(b'dummy')
2102 2109 engines.append(e)
2103 2110 except NotImplementedError:
2104 2111 pass
2105 2112
2106 2113 revs = list(rl.revs(startrev, len(rl) - 1))
2107 2114
2108 2115 def rlfh(rl):
2109 2116 if rl._inline:
2110 2117 return getsvfs(repo)(rl.indexfile)
2111 2118 else:
2112 2119 return getsvfs(repo)(rl.datafile)
2113 2120
2114 2121 def doread():
2115 2122 rl.clearcaches()
2116 2123 for rev in revs:
2117 2124 segmentforrevs(rev, rev)
2118 2125
2119 2126 def doreadcachedfh():
2120 2127 rl.clearcaches()
2121 2128 fh = rlfh(rl)
2122 2129 for rev in revs:
2123 2130 segmentforrevs(rev, rev, df=fh)
2124 2131
2125 2132 def doreadbatch():
2126 2133 rl.clearcaches()
2127 2134 segmentforrevs(revs[0], revs[-1])
2128 2135
2129 2136 def doreadbatchcachedfh():
2130 2137 rl.clearcaches()
2131 2138 fh = rlfh(rl)
2132 2139 segmentforrevs(revs[0], revs[-1], df=fh)
2133 2140
2134 2141 def dochunk():
2135 2142 rl.clearcaches()
2136 2143 fh = rlfh(rl)
2137 2144 for rev in revs:
2138 2145 rl._chunk(rev, df=fh)
2139 2146
2140 2147 chunks = [None]
2141 2148
2142 2149 def dochunkbatch():
2143 2150 rl.clearcaches()
2144 2151 fh = rlfh(rl)
2145 2152 # Save chunks as a side-effect.
2146 2153 chunks[0] = rl._chunks(revs, df=fh)
2147 2154
2148 2155 def docompress(compressor):
2149 2156 rl.clearcaches()
2150 2157
2151 2158 try:
2152 2159 # Swap in the requested compression engine.
2153 2160 oldcompressor = rl._compressor
2154 2161 rl._compressor = compressor
2155 2162 for chunk in chunks[0]:
2156 2163 rl.compress(chunk)
2157 2164 finally:
2158 2165 rl._compressor = oldcompressor
2159 2166
2160 2167 benches = [
2161 2168 (lambda: doread(), b'read'),
2162 2169 (lambda: doreadcachedfh(), b'read w/ reused fd'),
2163 2170 (lambda: doreadbatch(), b'read batch'),
2164 2171 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
2165 2172 (lambda: dochunk(), b'chunk'),
2166 2173 (lambda: dochunkbatch(), b'chunk batch'),
2167 2174 ]
2168 2175
2169 2176 for engine in sorted(engines):
2170 2177 compressor = util.compengines[engine].revlogcompressor()
2171 2178 benches.append((functools.partial(docompress, compressor),
2172 2179 b'compress w/ %s' % engine))
2173 2180
2174 2181 for fn, title in benches:
2175 2182 timer, fm = gettimer(ui, opts)
2176 2183 timer(fn, title=title)
2177 2184 fm.end()
2178 2185
2179 2186 @command(b'perfrevlogrevision', revlogopts + formatteropts +
2180 2187 [(b'', b'cache', False, b'use caches instead of clearing')],
2181 2188 b'-c|-m|FILE REV')
2182 2189 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
2183 2190 """Benchmark obtaining a revlog revision.
2184 2191
2185 2192 Obtaining a revlog revision consists of roughly the following steps:
2186 2193
2187 2194 1. Compute the delta chain
2188 2195 2. Slice the delta chain if applicable
2189 2196 3. Obtain the raw chunks for that delta chain
2190 2197 4. Decompress each raw chunk
2191 2198 5. Apply binary patches to obtain fulltext
2192 2199 6. Verify hash of fulltext
2193 2200
2194 2201 This command measures the time spent in each of these phases.
2195 2202 """
2196 2203 opts = _byteskwargs(opts)
2197 2204
2198 2205 if opts.get(b'changelog') or opts.get(b'manifest'):
2199 2206 file_, rev = None, file_
2200 2207 elif rev is None:
2201 2208 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
2202 2209
2203 2210 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
2204 2211
2205 2212 # _chunkraw was renamed to _getsegmentforrevs.
2206 2213 try:
2207 2214 segmentforrevs = r._getsegmentforrevs
2208 2215 except AttributeError:
2209 2216 segmentforrevs = r._chunkraw
2210 2217
2211 2218 node = r.lookup(rev)
2212 2219 rev = r.rev(node)
2213 2220
2214 2221 def getrawchunks(data, chain):
2215 2222 start = r.start
2216 2223 length = r.length
2217 2224 inline = r._inline
2218 2225 iosize = r._io.size
2219 2226 buffer = util.buffer
2220 2227
2221 2228 chunks = []
2222 2229 ladd = chunks.append
2223 2230 for idx, item in enumerate(chain):
2224 2231 offset = start(item[0])
2225 2232 bits = data[idx]
2226 2233 for rev in item:
2227 2234 chunkstart = start(rev)
2228 2235 if inline:
2229 2236 chunkstart += (rev + 1) * iosize
2230 2237 chunklength = length(rev)
2231 2238 ladd(buffer(bits, chunkstart - offset, chunklength))
2232 2239
2233 2240 return chunks
2234 2241
2235 2242 def dodeltachain(rev):
2236 2243 if not cache:
2237 2244 r.clearcaches()
2238 2245 r._deltachain(rev)
2239 2246
2240 2247 def doread(chain):
2241 2248 if not cache:
2242 2249 r.clearcaches()
2243 2250 for item in slicedchain:
2244 2251 segmentforrevs(item[0], item[-1])
2245 2252
2246 2253 def doslice(r, chain, size):
2247 2254 for s in slicechunk(r, chain, targetsize=size):
2248 2255 pass
2249 2256
2250 2257 def dorawchunks(data, chain):
2251 2258 if not cache:
2252 2259 r.clearcaches()
2253 2260 getrawchunks(data, chain)
2254 2261
2255 2262 def dodecompress(chunks):
2256 2263 decomp = r.decompress
2257 2264 for chunk in chunks:
2258 2265 decomp(chunk)
2259 2266
2260 2267 def dopatch(text, bins):
2261 2268 if not cache:
2262 2269 r.clearcaches()
2263 2270 mdiff.patches(text, bins)
2264 2271
2265 2272 def dohash(text):
2266 2273 if not cache:
2267 2274 r.clearcaches()
2268 2275 r.checkhash(text, node, rev=rev)
2269 2276
2270 2277 def dorevision():
2271 2278 if not cache:
2272 2279 r.clearcaches()
2273 2280 r.revision(node)
2274 2281
2275 2282 try:
2276 2283 from mercurial.revlogutils.deltas import slicechunk
2277 2284 except ImportError:
2278 2285 slicechunk = getattr(revlog, '_slicechunk', None)
2279 2286
2280 2287 size = r.length(rev)
2281 2288 chain = r._deltachain(rev)[0]
2282 2289 if not getattr(r, '_withsparseread', False):
2283 2290 slicedchain = (chain,)
2284 2291 else:
2285 2292 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
2286 2293 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
2287 2294 rawchunks = getrawchunks(data, slicedchain)
2288 2295 bins = r._chunks(chain)
2289 2296 text = bytes(bins[0])
2290 2297 bins = bins[1:]
2291 2298 text = mdiff.patches(text, bins)
2292 2299
2293 2300 benches = [
2294 2301 (lambda: dorevision(), b'full'),
2295 2302 (lambda: dodeltachain(rev), b'deltachain'),
2296 2303 (lambda: doread(chain), b'read'),
2297 2304 ]
2298 2305
2299 2306 if getattr(r, '_withsparseread', False):
2300 2307 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
2301 2308 benches.append(slicing)
2302 2309
2303 2310 benches.extend([
2304 2311 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
2305 2312 (lambda: dodecompress(rawchunks), b'decompress'),
2306 2313 (lambda: dopatch(text, bins), b'patch'),
2307 2314 (lambda: dohash(text), b'hash'),
2308 2315 ])
2309 2316
2310 2317 timer, fm = gettimer(ui, opts)
2311 2318 for fn, title in benches:
2312 2319 timer(fn, title=title)
2313 2320 fm.end()
2314 2321
2315 2322 @command(b'perfrevset',
2316 2323 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
2317 2324 (b'', b'contexts', False, b'obtain changectx for each revision')]
2318 2325 + formatteropts, b"REVSET")
2319 2326 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
2320 2327 """benchmark the execution time of a revset
2321 2328
2322 2329 Use the --clean option if need to evaluate the impact of build volatile
2323 2330 revisions set cache on the revset execution. Volatile cache hold filtered
2324 2331 and obsolete related cache."""
2325 2332 opts = _byteskwargs(opts)
2326 2333
2327 2334 timer, fm = gettimer(ui, opts)
2328 2335 def d():
2329 2336 if clear:
2330 2337 repo.invalidatevolatilesets()
2331 2338 if contexts:
2332 2339 for ctx in repo.set(expr): pass
2333 2340 else:
2334 2341 for r in repo.revs(expr): pass
2335 2342 timer(d)
2336 2343 fm.end()
2337 2344
2338 2345 @command(b'perfvolatilesets',
2339 2346 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
2340 2347 ] + formatteropts)
2341 2348 def perfvolatilesets(ui, repo, *names, **opts):
2342 2349 """benchmark the computation of various volatile set
2343 2350
2344 2351 Volatile set computes element related to filtering and obsolescence."""
2345 2352 opts = _byteskwargs(opts)
2346 2353 timer, fm = gettimer(ui, opts)
2347 2354 repo = repo.unfiltered()
2348 2355
2349 2356 def getobs(name):
2350 2357 def d():
2351 2358 repo.invalidatevolatilesets()
2352 2359 if opts[b'clear_obsstore']:
2353 2360 clearfilecache(repo, b'obsstore')
2354 2361 obsolete.getrevs(repo, name)
2355 2362 return d
2356 2363
2357 2364 allobs = sorted(obsolete.cachefuncs)
2358 2365 if names:
2359 2366 allobs = [n for n in allobs if n in names]
2360 2367
2361 2368 for name in allobs:
2362 2369 timer(getobs(name), title=name)
2363 2370
2364 2371 def getfiltered(name):
2365 2372 def d():
2366 2373 repo.invalidatevolatilesets()
2367 2374 if opts[b'clear_obsstore']:
2368 2375 clearfilecache(repo, b'obsstore')
2369 2376 repoview.filterrevs(repo, name)
2370 2377 return d
2371 2378
2372 2379 allfilter = sorted(repoview.filtertable)
2373 2380 if names:
2374 2381 allfilter = [n for n in allfilter if n in names]
2375 2382
2376 2383 for name in allfilter:
2377 2384 timer(getfiltered(name), title=name)
2378 2385 fm.end()
2379 2386
2380 2387 @command(b'perfbranchmap',
2381 2388 [(b'f', b'full', False,
2382 2389 b'Includes build time of subset'),
2383 2390 (b'', b'clear-revbranch', False,
2384 2391 b'purge the revbranch cache between computation'),
2385 2392 ] + formatteropts)
2386 2393 def perfbranchmap(ui, repo, *filternames, **opts):
2387 2394 """benchmark the update of a branchmap
2388 2395
2389 2396 This benchmarks the full repo.branchmap() call with read and write disabled
2390 2397 """
2391 2398 opts = _byteskwargs(opts)
2392 2399 full = opts.get(b"full", False)
2393 2400 clear_revbranch = opts.get(b"clear_revbranch", False)
2394 2401 timer, fm = gettimer(ui, opts)
2395 2402 def getbranchmap(filtername):
2396 2403 """generate a benchmark function for the filtername"""
2397 2404 if filtername is None:
2398 2405 view = repo
2399 2406 else:
2400 2407 view = repo.filtered(filtername)
2401 2408 if util.safehasattr(view._branchcaches, '_per_filter'):
2402 2409 filtered = view._branchcaches._per_filter
2403 2410 else:
2404 2411 # older versions
2405 2412 filtered = view._branchcaches
2406 2413 def d():
2407 2414 if clear_revbranch:
2408 2415 repo.revbranchcache()._clear()
2409 2416 if full:
2410 2417 view._branchcaches.clear()
2411 2418 else:
2412 2419 filtered.pop(filtername, None)
2413 2420 view.branchmap()
2414 2421 return d
2415 2422 # add filter in smaller subset to bigger subset
2416 2423 possiblefilters = set(repoview.filtertable)
2417 2424 if filternames:
2418 2425 possiblefilters &= set(filternames)
2419 2426 subsettable = getbranchmapsubsettable()
2420 2427 allfilters = []
2421 2428 while possiblefilters:
2422 2429 for name in possiblefilters:
2423 2430 subset = subsettable.get(name)
2424 2431 if subset not in possiblefilters:
2425 2432 break
2426 2433 else:
2427 2434 assert False, b'subset cycle %s!' % possiblefilters
2428 2435 allfilters.append(name)
2429 2436 possiblefilters.remove(name)
2430 2437
2431 2438 # warm the cache
2432 2439 if not full:
2433 2440 for name in allfilters:
2434 2441 repo.filtered(name).branchmap()
2435 2442 if not filternames or b'unfiltered' in filternames:
2436 2443 # add unfiltered
2437 2444 allfilters.append(None)
2438 2445
2439 2446 if util.safehasattr(branchmap.branchcache, 'fromfile'):
2440 2447 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
2441 2448 branchcacheread.set(classmethod(lambda *args: None))
2442 2449 else:
2443 2450 # older versions
2444 2451 branchcacheread = safeattrsetter(branchmap, b'read')
2445 2452 branchcacheread.set(lambda *args: None)
2446 2453 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
2447 2454 branchcachewrite.set(lambda *args: None)
2448 2455 try:
2449 2456 for name in allfilters:
2450 2457 printname = name
2451 2458 if name is None:
2452 2459 printname = b'unfiltered'
2453 2460 timer(getbranchmap(name), title=str(printname))
2454 2461 finally:
2455 2462 branchcacheread.restore()
2456 2463 branchcachewrite.restore()
2457 2464 fm.end()
2458 2465
2459 2466 @command(b'perfbranchmapupdate', [
2460 2467 (b'', b'base', [], b'subset of revision to start from'),
2461 2468 (b'', b'target', [], b'subset of revision to end with'),
2462 2469 (b'', b'clear-caches', False, b'clear cache between each runs')
2463 2470 ] + formatteropts)
2464 2471 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
2465 2472 """benchmark branchmap update from for <base> revs to <target> revs
2466 2473
2467 2474 If `--clear-caches` is passed, the following items will be reset before
2468 2475 each update:
2469 2476 * the changelog instance and associated indexes
2470 2477 * the rev-branch-cache instance
2471 2478
2472 2479 Examples:
2473 2480
2474 2481 # update for the one last revision
2475 2482 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
2476 2483
2477 2484 $ update for change coming with a new branch
2478 2485 $ hg perfbranchmapupdate --base 'stable' --target 'default'
2479 2486 """
2480 2487 from mercurial import branchmap
2481 2488 from mercurial import repoview
2482 2489 opts = _byteskwargs(opts)
2483 2490 timer, fm = gettimer(ui, opts)
2484 2491 clearcaches = opts[b'clear_caches']
2485 2492 unfi = repo.unfiltered()
2486 2493 x = [None] # used to pass data between closure
2487 2494
2488 2495 # we use a `list` here to avoid possible side effect from smartset
2489 2496 baserevs = list(scmutil.revrange(repo, base))
2490 2497 targetrevs = list(scmutil.revrange(repo, target))
2491 2498 if not baserevs:
2492 2499 raise error.Abort(b'no revisions selected for --base')
2493 2500 if not targetrevs:
2494 2501 raise error.Abort(b'no revisions selected for --target')
2495 2502
2496 2503 # make sure the target branchmap also contains the one in the base
2497 2504 targetrevs = list(set(baserevs) | set(targetrevs))
2498 2505 targetrevs.sort()
2499 2506
2500 2507 cl = repo.changelog
2501 2508 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
2502 2509 allbaserevs.sort()
2503 2510 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
2504 2511
2505 2512 newrevs = list(alltargetrevs.difference(allbaserevs))
2506 2513 newrevs.sort()
2507 2514
2508 2515 allrevs = frozenset(unfi.changelog.revs())
2509 2516 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
2510 2517 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
2511 2518
2512 2519 def basefilter(repo, visibilityexceptions=None):
2513 2520 return basefilterrevs
2514 2521
2515 2522 def targetfilter(repo, visibilityexceptions=None):
2516 2523 return targetfilterrevs
2517 2524
2518 2525 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
2519 2526 ui.status(msg % (len(allbaserevs), len(newrevs)))
2520 2527 if targetfilterrevs:
2521 2528 msg = b'(%d revisions still filtered)\n'
2522 2529 ui.status(msg % len(targetfilterrevs))
2523 2530
2524 2531 try:
2525 2532 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
2526 2533 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
2527 2534
2528 2535 baserepo = repo.filtered(b'__perf_branchmap_update_base')
2529 2536 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
2530 2537
2531 2538 # try to find an existing branchmap to reuse
2532 2539 subsettable = getbranchmapsubsettable()
2533 2540 candidatefilter = subsettable.get(None)
2534 2541 while candidatefilter is not None:
2535 2542 candidatebm = repo.filtered(candidatefilter).branchmap()
2536 2543 if candidatebm.validfor(baserepo):
2537 2544 filtered = repoview.filterrevs(repo, candidatefilter)
2538 2545 missing = [r for r in allbaserevs if r in filtered]
2539 2546 base = candidatebm.copy()
2540 2547 base.update(baserepo, missing)
2541 2548 break
2542 2549 candidatefilter = subsettable.get(candidatefilter)
2543 2550 else:
2544 2551 # no suitable subset where found
2545 2552 base = branchmap.branchcache()
2546 2553 base.update(baserepo, allbaserevs)
2547 2554
2548 2555 def setup():
2549 2556 x[0] = base.copy()
2550 2557 if clearcaches:
2551 2558 unfi._revbranchcache = None
2552 2559 clearchangelog(repo)
2553 2560
2554 2561 def bench():
2555 2562 x[0].update(targetrepo, newrevs)
2556 2563
2557 2564 timer(bench, setup=setup)
2558 2565 fm.end()
2559 2566 finally:
2560 2567 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
2561 2568 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
2562 2569
2563 2570 @command(b'perfbranchmapload', [
2564 2571 (b'f', b'filter', b'', b'Specify repoview filter'),
2565 2572 (b'', b'list', False, b'List brachmap filter caches'),
2566 2573 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
2567 2574
2568 2575 ] + formatteropts)
2569 2576 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
2570 2577 """benchmark reading the branchmap"""
2571 2578 opts = _byteskwargs(opts)
2572 2579 clearrevlogs = opts[b'clear_revlogs']
2573 2580
2574 2581 if list:
2575 2582 for name, kind, st in repo.cachevfs.readdir(stat=True):
2576 2583 if name.startswith(b'branch2'):
2577 2584 filtername = name.partition(b'-')[2] or b'unfiltered'
2578 2585 ui.status(b'%s - %s\n'
2579 2586 % (filtername, util.bytecount(st.st_size)))
2580 2587 return
2581 2588 if not filter:
2582 2589 filter = None
2583 2590 subsettable = getbranchmapsubsettable()
2584 2591 if filter is None:
2585 2592 repo = repo.unfiltered()
2586 2593 else:
2587 2594 repo = repoview.repoview(repo, filter)
2588 2595
2589 2596 repo.branchmap() # make sure we have a relevant, up to date branchmap
2590 2597
2591 2598 try:
2592 2599 fromfile = branchmap.branchcache.fromfile
2593 2600 except AttributeError:
2594 2601 # older versions
2595 2602 fromfile = branchmap.read
2596 2603
2597 2604 currentfilter = filter
2598 2605 # try once without timer, the filter may not be cached
2599 2606 while fromfile(repo) is None:
2600 2607 currentfilter = subsettable.get(currentfilter)
2601 2608 if currentfilter is None:
2602 2609 raise error.Abort(b'No branchmap cached for %s repo'
2603 2610 % (filter or b'unfiltered'))
2604 2611 repo = repo.filtered(currentfilter)
2605 2612 timer, fm = gettimer(ui, opts)
2606 2613 def setup():
2607 2614 if clearrevlogs:
2608 2615 clearchangelog(repo)
2609 2616 def bench():
2610 2617 fromfile(repo)
2611 2618 timer(bench, setup=setup)
2612 2619 fm.end()
2613 2620
2614 2621 @command(b'perfloadmarkers')
2615 2622 def perfloadmarkers(ui, repo):
2616 2623 """benchmark the time to parse the on-disk markers for a repo
2617 2624
2618 2625 Result is the number of markers in the repo."""
2619 2626 timer, fm = gettimer(ui)
2620 2627 svfs = getsvfs(repo)
2621 2628 timer(lambda: len(obsolete.obsstore(svfs)))
2622 2629 fm.end()
2623 2630
2624 2631 @command(b'perflrucachedict', formatteropts +
2625 2632 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
2626 2633 (b'', b'mincost', 0, b'smallest cost of items in cache'),
2627 2634 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
2628 2635 (b'', b'size', 4, b'size of cache'),
2629 2636 (b'', b'gets', 10000, b'number of key lookups'),
2630 2637 (b'', b'sets', 10000, b'number of key sets'),
2631 2638 (b'', b'mixed', 10000, b'number of mixed mode operations'),
2632 2639 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
2633 2640 norepo=True)
2634 2641 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
2635 2642 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
2636 2643 opts = _byteskwargs(opts)
2637 2644
2638 2645 def doinit():
2639 2646 for i in _xrange(10000):
2640 2647 util.lrucachedict(size)
2641 2648
2642 2649 costrange = list(range(mincost, maxcost + 1))
2643 2650
2644 2651 values = []
2645 2652 for i in _xrange(size):
2646 2653 values.append(random.randint(0, _maxint))
2647 2654
2648 2655 # Get mode fills the cache and tests raw lookup performance with no
2649 2656 # eviction.
2650 2657 getseq = []
2651 2658 for i in _xrange(gets):
2652 2659 getseq.append(random.choice(values))
2653 2660
2654 2661 def dogets():
2655 2662 d = util.lrucachedict(size)
2656 2663 for v in values:
2657 2664 d[v] = v
2658 2665 for key in getseq:
2659 2666 value = d[key]
2660 2667 value # silence pyflakes warning
2661 2668
2662 2669 def dogetscost():
2663 2670 d = util.lrucachedict(size, maxcost=costlimit)
2664 2671 for i, v in enumerate(values):
2665 2672 d.insert(v, v, cost=costs[i])
2666 2673 for key in getseq:
2667 2674 try:
2668 2675 value = d[key]
2669 2676 value # silence pyflakes warning
2670 2677 except KeyError:
2671 2678 pass
2672 2679
2673 2680 # Set mode tests insertion speed with cache eviction.
2674 2681 setseq = []
2675 2682 costs = []
2676 2683 for i in _xrange(sets):
2677 2684 setseq.append(random.randint(0, _maxint))
2678 2685 costs.append(random.choice(costrange))
2679 2686
2680 2687 def doinserts():
2681 2688 d = util.lrucachedict(size)
2682 2689 for v in setseq:
2683 2690 d.insert(v, v)
2684 2691
2685 2692 def doinsertscost():
2686 2693 d = util.lrucachedict(size, maxcost=costlimit)
2687 2694 for i, v in enumerate(setseq):
2688 2695 d.insert(v, v, cost=costs[i])
2689 2696
2690 2697 def dosets():
2691 2698 d = util.lrucachedict(size)
2692 2699 for v in setseq:
2693 2700 d[v] = v
2694 2701
2695 2702 # Mixed mode randomly performs gets and sets with eviction.
2696 2703 mixedops = []
2697 2704 for i in _xrange(mixed):
2698 2705 r = random.randint(0, 100)
2699 2706 if r < mixedgetfreq:
2700 2707 op = 0
2701 2708 else:
2702 2709 op = 1
2703 2710
2704 2711 mixedops.append((op,
2705 2712 random.randint(0, size * 2),
2706 2713 random.choice(costrange)))
2707 2714
2708 2715 def domixed():
2709 2716 d = util.lrucachedict(size)
2710 2717
2711 2718 for op, v, cost in mixedops:
2712 2719 if op == 0:
2713 2720 try:
2714 2721 d[v]
2715 2722 except KeyError:
2716 2723 pass
2717 2724 else:
2718 2725 d[v] = v
2719 2726
2720 2727 def domixedcost():
2721 2728 d = util.lrucachedict(size, maxcost=costlimit)
2722 2729
2723 2730 for op, v, cost in mixedops:
2724 2731 if op == 0:
2725 2732 try:
2726 2733 d[v]
2727 2734 except KeyError:
2728 2735 pass
2729 2736 else:
2730 2737 d.insert(v, v, cost=cost)
2731 2738
2732 2739 benches = [
2733 2740 (doinit, b'init'),
2734 2741 ]
2735 2742
2736 2743 if costlimit:
2737 2744 benches.extend([
2738 2745 (dogetscost, b'gets w/ cost limit'),
2739 2746 (doinsertscost, b'inserts w/ cost limit'),
2740 2747 (domixedcost, b'mixed w/ cost limit'),
2741 2748 ])
2742 2749 else:
2743 2750 benches.extend([
2744 2751 (dogets, b'gets'),
2745 2752 (doinserts, b'inserts'),
2746 2753 (dosets, b'sets'),
2747 2754 (domixed, b'mixed')
2748 2755 ])
2749 2756
2750 2757 for fn, title in benches:
2751 2758 timer, fm = gettimer(ui, opts)
2752 2759 timer(fn, title=title)
2753 2760 fm.end()
2754 2761
2755 2762 @command(b'perfwrite', formatteropts)
2756 2763 def perfwrite(ui, repo, **opts):
2757 2764 """microbenchmark ui.write
2758 2765 """
2759 2766 opts = _byteskwargs(opts)
2760 2767
2761 2768 timer, fm = gettimer(ui, opts)
2762 2769 def write():
2763 2770 for i in range(100000):
2764 2771 ui.write((b'Testing write performance\n'))
2765 2772 timer(write)
2766 2773 fm.end()
2767 2774
2768 2775 def uisetup(ui):
2769 2776 if (util.safehasattr(cmdutil, b'openrevlog') and
2770 2777 not util.safehasattr(commands, b'debugrevlogopts')):
2771 2778 # for "historical portability":
2772 2779 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
2773 2780 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
2774 2781 # openrevlog() should cause failure, because it has been
2775 2782 # available since 3.5 (or 49c583ca48c4).
2776 2783 def openrevlog(orig, repo, cmd, file_, opts):
2777 2784 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
2778 2785 raise error.Abort(b"This version doesn't support --dir option",
2779 2786 hint=b"use 3.5 or later")
2780 2787 return orig(repo, cmd, file_, opts)
2781 2788 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
2782 2789
2783 2790 @command(b'perfprogress', formatteropts + [
2784 2791 (b'', b'topic', b'topic', b'topic for progress messages'),
2785 2792 (b'c', b'total', 1000000, b'total value we are progressing to'),
2786 2793 ], norepo=True)
2787 2794 def perfprogress(ui, topic=None, total=None, **opts):
2788 2795 """printing of progress bars"""
2789 2796 opts = _byteskwargs(opts)
2790 2797
2791 2798 timer, fm = gettimer(ui, opts)
2792 2799
2793 2800 def doprogress():
2794 2801 with ui.makeprogress(topic, total=total) as progress:
2795 2802 for i in pycompat.xrange(total):
2796 2803 progress.increment()
2797 2804
2798 2805 timer(doprogress)
2799 2806 fm.end()
@@ -1,320 +1,320 b''
1 1 #require test-repo
2 2
3 3 Set vars:
4 4
5 5 $ . "$TESTDIR/helpers-testrepo.sh"
6 6 $ CONTRIBDIR="$TESTDIR/../contrib"
7 7
8 8 Prepare repo:
9 9
10 10 $ hg init
11 11
12 12 $ echo this is file a > a
13 13 $ hg add a
14 14 $ hg commit -m first
15 15
16 16 $ echo adding to file a >> a
17 17 $ hg commit -m second
18 18
19 19 $ echo adding more to file a >> a
20 20 $ hg commit -m third
21 21
22 22 $ hg up -r 0
23 23 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
24 24 $ echo merge-this >> a
25 25 $ hg commit -m merge-able
26 26 created new head
27 27
28 28 $ hg up -r 2
29 29 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
30 30
31 31 perfstatus
32 32
33 33 $ cat >> $HGRCPATH << EOF
34 34 > [extensions]
35 35 > perf=$CONTRIBDIR/perf.py
36 36 > [perf]
37 37 > presleep=0
38 38 > stub=on
39 39 > parentscount=1
40 40 > EOF
41 41 $ hg help -e perf
42 42 perf extension - helper extension to measure performance
43 43
44 44 Configurations
45 45 ==============
46 46
47 47 "perf"
48 48 ------
49 49
50 50 "all-timing"
51 51 When set, additional statistic will be reported for each benchmark: best,
52 52 worst, median average. If not set only the best timing is reported
53 53 (default: off).
54 54
55 55 "presleep"
56 56 number of second to wait before any group of run (default: 1)
57 57
58 58 "stub"
59 59 When set, benchmark will only be run once, useful for testing (default:
60 60 off)
61 61
62 62 list of commands:
63 63
64 64 perfaddremove
65 65 (no help text available)
66 66 perfancestors
67 67 (no help text available)
68 68 perfancestorset
69 69 (no help text available)
70 70 perfannotate (no help text available)
71 71 perfbdiff benchmark a bdiff between revisions
72 72 perfbookmarks
73 73 benchmark parsing bookmarks from disk to memory
74 74 perfbranchmap
75 75 benchmark the update of a branchmap
76 76 perfbranchmapload
77 77 benchmark reading the branchmap
78 78 perfbranchmapupdate
79 79 benchmark branchmap update from for <base> revs to <target>
80 80 revs
81 81 perfbundleread
82 82 Benchmark reading of bundle files.
83 83 perfcca (no help text available)
84 84 perfchangegroupchangelog
85 85 Benchmark producing a changelog group for a changegroup.
86 86 perfchangeset
87 87 (no help text available)
88 88 perfctxfiles (no help text available)
89 89 perfdiffwd Profile diff of working directory changes
90 90 perfdirfoldmap
91 91 (no help text available)
92 92 perfdirs (no help text available)
93 93 perfdirstate (no help text available)
94 94 perfdirstatedirs
95 95 (no help text available)
96 96 perfdirstatefoldmap
97 97 (no help text available)
98 98 perfdirstatewrite
99 99 (no help text available)
100 100 perfdiscovery
101 101 benchmark discovery between local repo and the peer at given
102 102 path
103 103 perffncacheencode
104 104 (no help text available)
105 105 perffncacheload
106 106 (no help text available)
107 107 perffncachewrite
108 108 (no help text available)
109 109 perfheads benchmark the computation of a changelog heads
110 110 perfhelper-pathcopies
111 111 find statistic about potential parameters for the
112 112 'perftracecopies'
113 113 perfignore benchmark operation related to computing ignore
114 114 perfindex benchmark index creation time followed by a lookup
115 115 perflinelogedits
116 116 (no help text available)
117 117 perfloadmarkers
118 118 benchmark the time to parse the on-disk markers for a repo
119 119 perflog (no help text available)
120 120 perflookup (no help text available)
121 121 perflrucachedict
122 122 (no help text available)
123 123 perfmanifest benchmark the time to read a manifest from disk and return a
124 124 usable
125 125 perfmergecalculate
126 126 (no help text available)
127 127 perfmoonwalk benchmark walking the changelog backwards
128 128 perfnodelookup
129 129 (no help text available)
130 130 perfnodemap benchmark the time necessary to look up revision from a cold
131 131 nodemap
132 perfparents (no help text available)
132 perfparents benchmark the time necessary to fetch one changeset's parents.
133 133 perfpathcopies
134 134 benchmark the copy tracing logic
135 135 perfphases benchmark phasesets computation
136 136 perfphasesremote
137 137 benchmark time needed to analyse phases of the remote server
138 138 perfprogress printing of progress bars
139 139 perfrawfiles (no help text available)
140 140 perfrevlogchunks
141 141 Benchmark operations on revlog chunks.
142 142 perfrevlogindex
143 143 Benchmark operations against a revlog index.
144 144 perfrevlogrevision
145 145 Benchmark obtaining a revlog revision.
146 146 perfrevlogrevisions
147 147 Benchmark reading a series of revisions from a revlog.
148 148 perfrevlogwrite
149 149 Benchmark writing a series of revisions to a revlog.
150 150 perfrevrange (no help text available)
151 151 perfrevset benchmark the execution time of a revset
152 152 perfstartup (no help text available)
153 153 perfstatus (no help text available)
154 154 perftags (no help text available)
155 155 perftemplating
156 156 test the rendering time of a given template
157 157 perfunidiff benchmark a unified diff between revisions
158 158 perfvolatilesets
159 159 benchmark the computation of various volatile set
160 160 perfwalk (no help text available)
161 161 perfwrite microbenchmark ui.write
162 162
163 163 (use 'hg help -v perf' to show built-in aliases and global options)
164 164 $ hg perfaddremove
165 165 $ hg perfancestors
166 166 $ hg perfancestorset 2
167 167 $ hg perfannotate a
168 168 $ hg perfbdiff -c 1
169 169 $ hg perfbdiff --alldata 1
170 170 $ hg perfunidiff -c 1
171 171 $ hg perfunidiff --alldata 1
172 172 $ hg perfbookmarks
173 173 $ hg perfbranchmap
174 174 $ hg perfbranchmapload
175 175 $ hg perfbranchmapupdate --base "not tip" --target "tip"
176 176 benchmark of branchmap with 3 revisions with 1 new ones
177 177 $ hg perfcca
178 178 $ hg perfchangegroupchangelog
179 179 $ hg perfchangegroupchangelog --cgversion 01
180 180 $ hg perfchangeset 2
181 181 $ hg perfctxfiles 2
182 182 $ hg perfdiffwd
183 183 $ hg perfdirfoldmap
184 184 $ hg perfdirs
185 185 $ hg perfdirstate
186 186 $ hg perfdirstatedirs
187 187 $ hg perfdirstatefoldmap
188 188 $ hg perfdirstatewrite
189 189 #if repofncache
190 190 $ hg perffncacheencode
191 191 $ hg perffncacheload
192 192 $ hg debugrebuildfncache
193 193 fncache already up to date
194 194 $ hg perffncachewrite
195 195 $ hg debugrebuildfncache
196 196 fncache already up to date
197 197 #endif
198 198 $ hg perfheads
199 199 $ hg perfignore
200 200 $ hg perfindex
201 201 $ hg perflinelogedits -n 1
202 202 $ hg perfloadmarkers
203 203 $ hg perflog
204 204 $ hg perflookup 2
205 205 $ hg perflrucache
206 206 $ hg perfmanifest 2
207 207 $ hg perfmanifest -m 44fe2c8352bb3a478ffd7d8350bbc721920134d1
208 208 $ hg perfmanifest -m 44fe2c8352bb
209 209 abort: manifest revision must be integer or full node
210 210 [255]
211 211 $ hg perfmergecalculate -r 3
212 212 $ hg perfmoonwalk
213 213 $ hg perfnodelookup 2
214 214 $ hg perfpathcopies 1 2
215 215 $ hg perfprogress --total 1000
216 216 $ hg perfrawfiles 2
217 217 $ hg perfrevlogindex -c
218 218 #if reporevlogstore
219 219 $ hg perfrevlogrevisions .hg/store/data/a.i
220 220 #endif
221 221 $ hg perfrevlogrevision -m 0
222 222 $ hg perfrevlogchunks -c
223 223 $ hg perfrevrange
224 224 $ hg perfrevset 'all()'
225 225 $ hg perfstartup
226 226 $ hg perfstatus
227 227 $ hg perftags
228 228 $ hg perftemplating
229 229 $ hg perfvolatilesets
230 230 $ hg perfwalk
231 231 $ hg perfparents
232 232 $ hg perfdiscovery -q .
233 233
234 234 test actual output
235 235 ------------------
236 236
237 237 normal output:
238 238
239 239 $ hg perfheads --config perf.stub=no
240 240 ! wall * comb * user * sys * (best of *) (glob)
241 241
242 242 detailed output:
243 243
244 244 $ hg perfheads --config perf.all-timing=yes --config perf.stub=no
245 245 ! wall * comb * user * sys * (best of *) (glob)
246 246 ! wall * comb * user * sys * (max of *) (glob)
247 247 ! wall * comb * user * sys * (avg of *) (glob)
248 248 ! wall * comb * user * sys * (median of *) (glob)
249 249
250 250 test json output
251 251 ----------------
252 252
253 253 normal output:
254 254
255 255 $ hg perfheads --template json --config perf.stub=no
256 256 [
257 257 {
258 258 "comb": *, (glob)
259 259 "count": *, (glob)
260 260 "sys": *, (glob)
261 261 "user": *, (glob)
262 262 "wall": * (glob)
263 263 }
264 264 ]
265 265
266 266 detailed output:
267 267
268 268 $ hg perfheads --template json --config perf.all-timing=yes --config perf.stub=no
269 269 [
270 270 {
271 271 "avg.comb": *, (glob)
272 272 "avg.count": *, (glob)
273 273 "avg.sys": *, (glob)
274 274 "avg.user": *, (glob)
275 275 "avg.wall": *, (glob)
276 276 "comb": *, (glob)
277 277 "count": *, (glob)
278 278 "max.comb": *, (glob)
279 279 "max.count": *, (glob)
280 280 "max.sys": *, (glob)
281 281 "max.user": *, (glob)
282 282 "max.wall": *, (glob)
283 283 "median.comb": *, (glob)
284 284 "median.count": *, (glob)
285 285 "median.sys": *, (glob)
286 286 "median.user": *, (glob)
287 287 "median.wall": *, (glob)
288 288 "sys": *, (glob)
289 289 "user": *, (glob)
290 290 "wall": * (glob)
291 291 }
292 292 ]
293 293
294 294 Check perf.py for historical portability
295 295 ----------------------------------------
296 296
297 297 $ cd "$TESTDIR/.."
298 298
299 299 $ (testrepohg files -r 1.2 glob:mercurial/*.c glob:mercurial/*.py;
300 300 > testrepohg files -r tip glob:mercurial/*.c glob:mercurial/*.py) |
301 301 > "$TESTDIR"/check-perf-code.py contrib/perf.py
302 302 contrib/perf.py:\d+: (re)
303 303 > from mercurial import (
304 304 import newer module separately in try clause for early Mercurial
305 305 contrib/perf.py:\d+: (re)
306 306 > from mercurial import (
307 307 import newer module separately in try clause for early Mercurial
308 308 contrib/perf.py:\d+: (re)
309 309 > origindexpath = orig.opener.join(orig.indexfile)
310 310 use getvfs()/getsvfs() for early Mercurial
311 311 contrib/perf.py:\d+: (re)
312 312 > origdatapath = orig.opener.join(orig.datafile)
313 313 use getvfs()/getsvfs() for early Mercurial
314 314 contrib/perf.py:\d+: (re)
315 315 > vfs = vfsmod.vfs(tmpdir)
316 316 use getvfs()/getsvfs() for early Mercurial
317 317 contrib/perf.py:\d+: (re)
318 318 > vfs.options = getattr(orig.opener, 'options', None)
319 319 use getvfs()/getsvfs() for early Mercurial
320 320 [1]
General Comments 0
You need to be logged in to leave comments. Login now