##// END OF EJS Templates
perf: introduce a `perf.run-limits` options...
marmoute -
r42186:5a1e621b default
parent child Browse files
Show More
@@ -1,2817 +1,2857 b''
1 1 # perf.py - performance test routines
2 2 '''helper extension to measure performance
3 3
4 4 Configurations
5 5 ==============
6 6
7 7 ``perf``
8 8 --------
9 9
10 10 ``all-timing``
11 11 When set, additional statistic will be reported for each benchmark: best,
12 12 worst, median average. If not set only the best timing is reported
13 13 (default: off).
14 14
15 15 ``presleep``
16 16 number of second to wait before any group of run (default: 1)
17 17
18 ``run-limits``
19 Control the number of run each benchmark will perform. The option value
20 should be a list of `<time>-<numberofrun>` pairs. After each run the
21 condition are considered in order with the following logic:
22
23 If benchmark have been running for <time> seconds, and we have performed
24 <numberofrun> iterations, stop the benchmark,
25
26 The default value is: `3.0-100, 10.0-3`
27
18 28 ``stub``
19 29 When set, benchmark will only be run once, useful for testing (default: off)
20 30 '''
21 31
22 32 # "historical portability" policy of perf.py:
23 33 #
24 34 # We have to do:
25 35 # - make perf.py "loadable" with as wide Mercurial version as possible
26 36 # This doesn't mean that perf commands work correctly with that Mercurial.
27 37 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
28 38 # - make historical perf command work correctly with as wide Mercurial
29 39 # version as possible
30 40 #
31 41 # We have to do, if possible with reasonable cost:
32 42 # - make recent perf command for historical feature work correctly
33 43 # with early Mercurial
34 44 #
35 45 # We don't have to do:
36 46 # - make perf command for recent feature work correctly with early
37 47 # Mercurial
38 48
39 49 from __future__ import absolute_import
40 50 import contextlib
41 51 import functools
42 52 import gc
43 53 import os
44 54 import random
45 55 import shutil
46 56 import struct
47 57 import sys
48 58 import tempfile
49 59 import threading
50 60 import time
51 61 from mercurial import (
52 62 changegroup,
53 63 cmdutil,
54 64 commands,
55 65 copies,
56 66 error,
57 67 extensions,
58 68 hg,
59 69 mdiff,
60 70 merge,
61 71 revlog,
62 72 util,
63 73 )
64 74
65 75 # for "historical portability":
66 76 # try to import modules separately (in dict order), and ignore
67 77 # failure, because these aren't available with early Mercurial
68 78 try:
69 79 from mercurial import branchmap # since 2.5 (or bcee63733aad)
70 80 except ImportError:
71 81 pass
72 82 try:
73 83 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
74 84 except ImportError:
75 85 pass
76 86 try:
77 87 from mercurial import registrar # since 3.7 (or 37d50250b696)
78 88 dir(registrar) # forcibly load it
79 89 except ImportError:
80 90 registrar = None
81 91 try:
82 92 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
83 93 except ImportError:
84 94 pass
85 95 try:
86 96 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
87 97 except ImportError:
88 98 pass
89 99 try:
90 100 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
91 101 except ImportError:
92 102 pass
93 103
94 104
95 105 def identity(a):
96 106 return a
97 107
98 108 try:
99 109 from mercurial import pycompat
100 110 getargspec = pycompat.getargspec # added to module after 4.5
101 111 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
102 112 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
103 113 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
104 114 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
105 115 if pycompat.ispy3:
106 116 _maxint = sys.maxsize # per py3 docs for replacing maxint
107 117 else:
108 118 _maxint = sys.maxint
109 119 except (ImportError, AttributeError):
110 120 import inspect
111 121 getargspec = inspect.getargspec
112 122 _byteskwargs = identity
113 123 fsencode = identity # no py3 support
114 124 _maxint = sys.maxint # no py3 support
115 125 _sysstr = lambda x: x # no py3 support
116 126 _xrange = xrange
117 127
118 128 try:
119 129 # 4.7+
120 130 queue = pycompat.queue.Queue
121 131 except (AttributeError, ImportError):
122 132 # <4.7.
123 133 try:
124 134 queue = pycompat.queue
125 135 except (AttributeError, ImportError):
126 136 queue = util.queue
127 137
128 138 try:
129 139 from mercurial import logcmdutil
130 140 makelogtemplater = logcmdutil.maketemplater
131 141 except (AttributeError, ImportError):
132 142 try:
133 143 makelogtemplater = cmdutil.makelogtemplater
134 144 except (AttributeError, ImportError):
135 145 makelogtemplater = None
136 146
137 147 # for "historical portability":
138 148 # define util.safehasattr forcibly, because util.safehasattr has been
139 149 # available since 1.9.3 (or 94b200a11cf7)
140 150 _undefined = object()
141 151 def safehasattr(thing, attr):
142 152 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
143 153 setattr(util, 'safehasattr', safehasattr)
144 154
145 155 # for "historical portability":
146 156 # define util.timer forcibly, because util.timer has been available
147 157 # since ae5d60bb70c9
148 158 if safehasattr(time, 'perf_counter'):
149 159 util.timer = time.perf_counter
150 160 elif os.name == b'nt':
151 161 util.timer = time.clock
152 162 else:
153 163 util.timer = time.time
154 164
155 165 # for "historical portability":
156 166 # use locally defined empty option list, if formatteropts isn't
157 167 # available, because commands.formatteropts has been available since
158 168 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
159 169 # available since 2.2 (or ae5f92e154d3)
160 170 formatteropts = getattr(cmdutil, "formatteropts",
161 171 getattr(commands, "formatteropts", []))
162 172
163 173 # for "historical portability":
164 174 # use locally defined option list, if debugrevlogopts isn't available,
165 175 # because commands.debugrevlogopts has been available since 3.7 (or
166 176 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
167 177 # since 1.9 (or a79fea6b3e77).
168 178 revlogopts = getattr(cmdutil, "debugrevlogopts",
169 179 getattr(commands, "debugrevlogopts", [
170 180 (b'c', b'changelog', False, (b'open changelog')),
171 181 (b'm', b'manifest', False, (b'open manifest')),
172 182 (b'', b'dir', False, (b'open directory manifest')),
173 183 ]))
174 184
175 185 cmdtable = {}
176 186
177 187 # for "historical portability":
178 188 # define parsealiases locally, because cmdutil.parsealiases has been
179 189 # available since 1.5 (or 6252852b4332)
180 190 def parsealiases(cmd):
181 191 return cmd.split(b"|")
182 192
183 193 if safehasattr(registrar, 'command'):
184 194 command = registrar.command(cmdtable)
185 195 elif safehasattr(cmdutil, 'command'):
186 196 command = cmdutil.command(cmdtable)
187 197 if b'norepo' not in getargspec(command).args:
188 198 # for "historical portability":
189 199 # wrap original cmdutil.command, because "norepo" option has
190 200 # been available since 3.1 (or 75a96326cecb)
191 201 _command = command
192 202 def command(name, options=(), synopsis=None, norepo=False):
193 203 if norepo:
194 204 commands.norepo += b' %s' % b' '.join(parsealiases(name))
195 205 return _command(name, list(options), synopsis)
196 206 else:
197 207 # for "historical portability":
198 208 # define "@command" annotation locally, because cmdutil.command
199 209 # has been available since 1.9 (or 2daa5179e73f)
200 210 def command(name, options=(), synopsis=None, norepo=False):
201 211 def decorator(func):
202 212 if synopsis:
203 213 cmdtable[name] = func, list(options), synopsis
204 214 else:
205 215 cmdtable[name] = func, list(options)
206 216 if norepo:
207 217 commands.norepo += b' %s' % b' '.join(parsealiases(name))
208 218 return func
209 219 return decorator
210 220
211 221 try:
212 222 import mercurial.registrar
213 223 import mercurial.configitems
214 224 configtable = {}
215 225 configitem = mercurial.registrar.configitem(configtable)
216 226 configitem(b'perf', b'presleep',
217 227 default=mercurial.configitems.dynamicdefault,
218 228 )
219 229 configitem(b'perf', b'stub',
220 230 default=mercurial.configitems.dynamicdefault,
221 231 )
222 232 configitem(b'perf', b'parentscount',
223 233 default=mercurial.configitems.dynamicdefault,
224 234 )
225 235 configitem(b'perf', b'all-timing',
226 236 default=mercurial.configitems.dynamicdefault,
227 237 )
238 configitem(b'perf', b'run-limits',
239 default=mercurial.configitems.dynamicdefault,
240 )
228 241 except (ImportError, AttributeError):
229 242 pass
230 243
231 244 def getlen(ui):
232 245 if ui.configbool(b"perf", b"stub", False):
233 246 return lambda x: 1
234 247 return len
235 248
236 249 def gettimer(ui, opts=None):
237 250 """return a timer function and formatter: (timer, formatter)
238 251
239 252 This function exists to gather the creation of formatter in a single
240 253 place instead of duplicating it in all performance commands."""
241 254
242 255 # enforce an idle period before execution to counteract power management
243 256 # experimental config: perf.presleep
244 257 time.sleep(getint(ui, b"perf", b"presleep", 1))
245 258
246 259 if opts is None:
247 260 opts = {}
248 261 # redirect all to stderr unless buffer api is in use
249 262 if not ui._buffers:
250 263 ui = ui.copy()
251 264 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
252 265 if uifout:
253 266 # for "historical portability":
254 267 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
255 268 uifout.set(ui.ferr)
256 269
257 270 # get a formatter
258 271 uiformatter = getattr(ui, 'formatter', None)
259 272 if uiformatter:
260 273 fm = uiformatter(b'perf', opts)
261 274 else:
262 275 # for "historical portability":
263 276 # define formatter locally, because ui.formatter has been
264 277 # available since 2.2 (or ae5f92e154d3)
265 278 from mercurial import node
266 279 class defaultformatter(object):
267 280 """Minimized composition of baseformatter and plainformatter
268 281 """
269 282 def __init__(self, ui, topic, opts):
270 283 self._ui = ui
271 284 if ui.debugflag:
272 285 self.hexfunc = node.hex
273 286 else:
274 287 self.hexfunc = node.short
275 288 def __nonzero__(self):
276 289 return False
277 290 __bool__ = __nonzero__
278 291 def startitem(self):
279 292 pass
280 293 def data(self, **data):
281 294 pass
282 295 def write(self, fields, deftext, *fielddata, **opts):
283 296 self._ui.write(deftext % fielddata, **opts)
284 297 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
285 298 if cond:
286 299 self._ui.write(deftext % fielddata, **opts)
287 300 def plain(self, text, **opts):
288 301 self._ui.write(text, **opts)
289 302 def end(self):
290 303 pass
291 304 fm = defaultformatter(ui, b'perf', opts)
292 305
293 306 # stub function, runs code only once instead of in a loop
294 307 # experimental config: perf.stub
295 308 if ui.configbool(b"perf", b"stub", False):
296 309 return functools.partial(stub_timer, fm), fm
297 310
298 311 # experimental config: perf.all-timing
299 312 displayall = ui.configbool(b"perf", b"all-timing", False)
300 return functools.partial(_timer, fm, displayall=displayall), fm
313
314 # experimental config: perf.run-limits
315 limitspec = ui.configlist(b"perf", b"run-limits", [])
316 limits = []
317 for item in limitspec:
318 parts = item.split('-', 1)
319 if len(parts) < 2:
320 ui.warn(('malformatted run limit entry, missing "-": %s\n'
321 % item))
322 continue
323 try:
324 time_limit = float(parts[0])
325 except ValueError as e:
326 ui.warn(('malformatted run limit entry, %s: %s\n'
327 % (e, item)))
328 continue
329 try:
330 run_limit = int(parts[1])
331 except ValueError as e:
332 ui.warn(('malformatted run limit entry, %s: %s\n'
333 % (e, item)))
334 continue
335 limits.append((time_limit, run_limit))
336 if not limits:
337 limits = DEFAULTLIMITS
338
339 t = functools.partial(_timer, fm, displayall=displayall, limits=limits)
340 return t, fm
301 341
302 342 def stub_timer(fm, func, setup=None, title=None):
303 343 if setup is not None:
304 344 setup()
305 345 func()
306 346
307 347 @contextlib.contextmanager
308 348 def timeone():
309 349 r = []
310 350 ostart = os.times()
311 351 cstart = util.timer()
312 352 yield r
313 353 cstop = util.timer()
314 354 ostop = os.times()
315 355 a, b = ostart, ostop
316 356 r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
317 357
318 358
319 359 # list of stop condition (elapsed time, minimal run count)
320 360 DEFAULTLIMITS = (
321 361 (3.0, 100),
322 362 (10.0, 3),
323 363 )
324 364
325 365 def _timer(fm, func, setup=None, title=None, displayall=False,
326 366 limits=DEFAULTLIMITS):
327 367 gc.collect()
328 368 results = []
329 369 begin = util.timer()
330 370 count = 0
331 371 keepgoing = True
332 372 while keepgoing:
333 373 if setup is not None:
334 374 setup()
335 375 with timeone() as item:
336 376 r = func()
337 377 count += 1
338 378 results.append(item[0])
339 379 cstop = util.timer()
340 380 # Look for a stop condition.
341 381 elapsed = cstop - begin
342 382 for t, mincount in limits:
343 383 if elapsed >= t and count >= mincount:
344 384 keepgoing = False
345 385 break
346 386
347 387 formatone(fm, results, title=title, result=r,
348 388 displayall=displayall)
349 389
350 390 def formatone(fm, timings, title=None, result=None, displayall=False):
351 391
352 392 count = len(timings)
353 393
354 394 fm.startitem()
355 395
356 396 if title:
357 397 fm.write(b'title', b'! %s\n', title)
358 398 if result:
359 399 fm.write(b'result', b'! result: %s\n', result)
360 400 def display(role, entry):
361 401 prefix = b''
362 402 if role != b'best':
363 403 prefix = b'%s.' % role
364 404 fm.plain(b'!')
365 405 fm.write(prefix + b'wall', b' wall %f', entry[0])
366 406 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
367 407 fm.write(prefix + b'user', b' user %f', entry[1])
368 408 fm.write(prefix + b'sys', b' sys %f', entry[2])
369 409 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
370 410 fm.plain(b'\n')
371 411 timings.sort()
372 412 min_val = timings[0]
373 413 display(b'best', min_val)
374 414 if displayall:
375 415 max_val = timings[-1]
376 416 display(b'max', max_val)
377 417 avg = tuple([sum(x) / count for x in zip(*timings)])
378 418 display(b'avg', avg)
379 419 median = timings[len(timings) // 2]
380 420 display(b'median', median)
381 421
382 422 # utilities for historical portability
383 423
384 424 def getint(ui, section, name, default):
385 425 # for "historical portability":
386 426 # ui.configint has been available since 1.9 (or fa2b596db182)
387 427 v = ui.config(section, name, None)
388 428 if v is None:
389 429 return default
390 430 try:
391 431 return int(v)
392 432 except ValueError:
393 433 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
394 434 % (section, name, v))
395 435
396 436 def safeattrsetter(obj, name, ignoremissing=False):
397 437 """Ensure that 'obj' has 'name' attribute before subsequent setattr
398 438
399 439 This function is aborted, if 'obj' doesn't have 'name' attribute
400 440 at runtime. This avoids overlooking removal of an attribute, which
401 441 breaks assumption of performance measurement, in the future.
402 442
403 443 This function returns the object to (1) assign a new value, and
404 444 (2) restore an original value to the attribute.
405 445
406 446 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
407 447 abortion, and this function returns None. This is useful to
408 448 examine an attribute, which isn't ensured in all Mercurial
409 449 versions.
410 450 """
411 451 if not util.safehasattr(obj, name):
412 452 if ignoremissing:
413 453 return None
414 454 raise error.Abort((b"missing attribute %s of %s might break assumption"
415 455 b" of performance measurement") % (name, obj))
416 456
417 457 origvalue = getattr(obj, _sysstr(name))
418 458 class attrutil(object):
419 459 def set(self, newvalue):
420 460 setattr(obj, _sysstr(name), newvalue)
421 461 def restore(self):
422 462 setattr(obj, _sysstr(name), origvalue)
423 463
424 464 return attrutil()
425 465
426 466 # utilities to examine each internal API changes
427 467
428 468 def getbranchmapsubsettable():
429 469 # for "historical portability":
430 470 # subsettable is defined in:
431 471 # - branchmap since 2.9 (or 175c6fd8cacc)
432 472 # - repoview since 2.5 (or 59a9f18d4587)
433 473 for mod in (branchmap, repoview):
434 474 subsettable = getattr(mod, 'subsettable', None)
435 475 if subsettable:
436 476 return subsettable
437 477
438 478 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
439 479 # branchmap and repoview modules exist, but subsettable attribute
440 480 # doesn't)
441 481 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
442 482 hint=b"use 2.5 or later")
443 483
444 484 def getsvfs(repo):
445 485 """Return appropriate object to access files under .hg/store
446 486 """
447 487 # for "historical portability":
448 488 # repo.svfs has been available since 2.3 (or 7034365089bf)
449 489 svfs = getattr(repo, 'svfs', None)
450 490 if svfs:
451 491 return svfs
452 492 else:
453 493 return getattr(repo, 'sopener')
454 494
455 495 def getvfs(repo):
456 496 """Return appropriate object to access files under .hg
457 497 """
458 498 # for "historical portability":
459 499 # repo.vfs has been available since 2.3 (or 7034365089bf)
460 500 vfs = getattr(repo, 'vfs', None)
461 501 if vfs:
462 502 return vfs
463 503 else:
464 504 return getattr(repo, 'opener')
465 505
466 506 def repocleartagscachefunc(repo):
467 507 """Return the function to clear tags cache according to repo internal API
468 508 """
469 509 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
470 510 # in this case, setattr(repo, '_tagscache', None) or so isn't
471 511 # correct way to clear tags cache, because existing code paths
472 512 # expect _tagscache to be a structured object.
473 513 def clearcache():
474 514 # _tagscache has been filteredpropertycache since 2.5 (or
475 515 # 98c867ac1330), and delattr() can't work in such case
476 516 if b'_tagscache' in vars(repo):
477 517 del repo.__dict__[b'_tagscache']
478 518 return clearcache
479 519
480 520 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
481 521 if repotags: # since 1.4 (or 5614a628d173)
482 522 return lambda : repotags.set(None)
483 523
484 524 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
485 525 if repotagscache: # since 0.6 (or d7df759d0e97)
486 526 return lambda : repotagscache.set(None)
487 527
488 528 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
489 529 # this point, but it isn't so problematic, because:
490 530 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
491 531 # in perftags() causes failure soon
492 532 # - perf.py itself has been available since 1.1 (or eb240755386d)
493 533 raise error.Abort((b"tags API of this hg command is unknown"))
494 534
495 535 # utilities to clear cache
496 536
497 537 def clearfilecache(obj, attrname):
498 538 unfiltered = getattr(obj, 'unfiltered', None)
499 539 if unfiltered is not None:
500 540 obj = obj.unfiltered()
501 541 if attrname in vars(obj):
502 542 delattr(obj, attrname)
503 543 obj._filecache.pop(attrname, None)
504 544
505 545 def clearchangelog(repo):
506 546 if repo is not repo.unfiltered():
507 547 object.__setattr__(repo, r'_clcachekey', None)
508 548 object.__setattr__(repo, r'_clcache', None)
509 549 clearfilecache(repo.unfiltered(), 'changelog')
510 550
511 551 # perf commands
512 552
513 553 @command(b'perfwalk', formatteropts)
514 554 def perfwalk(ui, repo, *pats, **opts):
515 555 opts = _byteskwargs(opts)
516 556 timer, fm = gettimer(ui, opts)
517 557 m = scmutil.match(repo[None], pats, {})
518 558 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
519 559 ignored=False))))
520 560 fm.end()
521 561
522 562 @command(b'perfannotate', formatteropts)
523 563 def perfannotate(ui, repo, f, **opts):
524 564 opts = _byteskwargs(opts)
525 565 timer, fm = gettimer(ui, opts)
526 566 fc = repo[b'.'][f]
527 567 timer(lambda: len(fc.annotate(True)))
528 568 fm.end()
529 569
530 570 @command(b'perfstatus',
531 571 [(b'u', b'unknown', False,
532 572 b'ask status to look for unknown files')] + formatteropts)
533 573 def perfstatus(ui, repo, **opts):
534 574 opts = _byteskwargs(opts)
535 575 #m = match.always(repo.root, repo.getcwd())
536 576 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
537 577 # False))))
538 578 timer, fm = gettimer(ui, opts)
539 579 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
540 580 fm.end()
541 581
542 582 @command(b'perfaddremove', formatteropts)
543 583 def perfaddremove(ui, repo, **opts):
544 584 opts = _byteskwargs(opts)
545 585 timer, fm = gettimer(ui, opts)
546 586 try:
547 587 oldquiet = repo.ui.quiet
548 588 repo.ui.quiet = True
549 589 matcher = scmutil.match(repo[None])
550 590 opts[b'dry_run'] = True
551 591 if b'uipathfn' in getargspec(scmutil.addremove).args:
552 592 uipathfn = scmutil.getuipathfn(repo)
553 593 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
554 594 else:
555 595 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
556 596 finally:
557 597 repo.ui.quiet = oldquiet
558 598 fm.end()
559 599
560 600 def clearcaches(cl):
561 601 # behave somewhat consistently across internal API changes
562 602 if util.safehasattr(cl, b'clearcaches'):
563 603 cl.clearcaches()
564 604 elif util.safehasattr(cl, b'_nodecache'):
565 605 from mercurial.node import nullid, nullrev
566 606 cl._nodecache = {nullid: nullrev}
567 607 cl._nodepos = None
568 608
569 609 @command(b'perfheads', formatteropts)
570 610 def perfheads(ui, repo, **opts):
571 611 """benchmark the computation of a changelog heads"""
572 612 opts = _byteskwargs(opts)
573 613 timer, fm = gettimer(ui, opts)
574 614 cl = repo.changelog
575 615 def s():
576 616 clearcaches(cl)
577 617 def d():
578 618 len(cl.headrevs())
579 619 timer(d, setup=s)
580 620 fm.end()
581 621
582 622 @command(b'perftags', formatteropts+
583 623 [
584 624 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
585 625 ])
586 626 def perftags(ui, repo, **opts):
587 627 opts = _byteskwargs(opts)
588 628 timer, fm = gettimer(ui, opts)
589 629 repocleartagscache = repocleartagscachefunc(repo)
590 630 clearrevlogs = opts[b'clear_revlogs']
591 631 def s():
592 632 if clearrevlogs:
593 633 clearchangelog(repo)
594 634 clearfilecache(repo.unfiltered(), 'manifest')
595 635 repocleartagscache()
596 636 def t():
597 637 return len(repo.tags())
598 638 timer(t, setup=s)
599 639 fm.end()
600 640
601 641 @command(b'perfancestors', formatteropts)
602 642 def perfancestors(ui, repo, **opts):
603 643 opts = _byteskwargs(opts)
604 644 timer, fm = gettimer(ui, opts)
605 645 heads = repo.changelog.headrevs()
606 646 def d():
607 647 for a in repo.changelog.ancestors(heads):
608 648 pass
609 649 timer(d)
610 650 fm.end()
611 651
612 652 @command(b'perfancestorset', formatteropts)
613 653 def perfancestorset(ui, repo, revset, **opts):
614 654 opts = _byteskwargs(opts)
615 655 timer, fm = gettimer(ui, opts)
616 656 revs = repo.revs(revset)
617 657 heads = repo.changelog.headrevs()
618 658 def d():
619 659 s = repo.changelog.ancestors(heads)
620 660 for rev in revs:
621 661 rev in s
622 662 timer(d)
623 663 fm.end()
624 664
625 665 @command(b'perfdiscovery', formatteropts, b'PATH')
626 666 def perfdiscovery(ui, repo, path, **opts):
627 667 """benchmark discovery between local repo and the peer at given path
628 668 """
629 669 repos = [repo, None]
630 670 timer, fm = gettimer(ui, opts)
631 671 path = ui.expandpath(path)
632 672
633 673 def s():
634 674 repos[1] = hg.peer(ui, opts, path)
635 675 def d():
636 676 setdiscovery.findcommonheads(ui, *repos)
637 677 timer(d, setup=s)
638 678 fm.end()
639 679
640 680 @command(b'perfbookmarks', formatteropts +
641 681 [
642 682 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
643 683 ])
644 684 def perfbookmarks(ui, repo, **opts):
645 685 """benchmark parsing bookmarks from disk to memory"""
646 686 opts = _byteskwargs(opts)
647 687 timer, fm = gettimer(ui, opts)
648 688
649 689 clearrevlogs = opts[b'clear_revlogs']
650 690 def s():
651 691 if clearrevlogs:
652 692 clearchangelog(repo)
653 693 clearfilecache(repo, b'_bookmarks')
654 694 def d():
655 695 repo._bookmarks
656 696 timer(d, setup=s)
657 697 fm.end()
658 698
659 699 @command(b'perfbundleread', formatteropts, b'BUNDLE')
660 700 def perfbundleread(ui, repo, bundlepath, **opts):
661 701 """Benchmark reading of bundle files.
662 702
663 703 This command is meant to isolate the I/O part of bundle reading as
664 704 much as possible.
665 705 """
666 706 from mercurial import (
667 707 bundle2,
668 708 exchange,
669 709 streamclone,
670 710 )
671 711
672 712 opts = _byteskwargs(opts)
673 713
674 714 def makebench(fn):
675 715 def run():
676 716 with open(bundlepath, b'rb') as fh:
677 717 bundle = exchange.readbundle(ui, fh, bundlepath)
678 718 fn(bundle)
679 719
680 720 return run
681 721
682 722 def makereadnbytes(size):
683 723 def run():
684 724 with open(bundlepath, b'rb') as fh:
685 725 bundle = exchange.readbundle(ui, fh, bundlepath)
686 726 while bundle.read(size):
687 727 pass
688 728
689 729 return run
690 730
691 731 def makestdioread(size):
692 732 def run():
693 733 with open(bundlepath, b'rb') as fh:
694 734 while fh.read(size):
695 735 pass
696 736
697 737 return run
698 738
699 739 # bundle1
700 740
701 741 def deltaiter(bundle):
702 742 for delta in bundle.deltaiter():
703 743 pass
704 744
705 745 def iterchunks(bundle):
706 746 for chunk in bundle.getchunks():
707 747 pass
708 748
709 749 # bundle2
710 750
711 751 def forwardchunks(bundle):
712 752 for chunk in bundle._forwardchunks():
713 753 pass
714 754
715 755 def iterparts(bundle):
716 756 for part in bundle.iterparts():
717 757 pass
718 758
719 759 def iterpartsseekable(bundle):
720 760 for part in bundle.iterparts(seekable=True):
721 761 pass
722 762
723 763 def seek(bundle):
724 764 for part in bundle.iterparts(seekable=True):
725 765 part.seek(0, os.SEEK_END)
726 766
727 767 def makepartreadnbytes(size):
728 768 def run():
729 769 with open(bundlepath, b'rb') as fh:
730 770 bundle = exchange.readbundle(ui, fh, bundlepath)
731 771 for part in bundle.iterparts():
732 772 while part.read(size):
733 773 pass
734 774
735 775 return run
736 776
737 777 benches = [
738 778 (makestdioread(8192), b'read(8k)'),
739 779 (makestdioread(16384), b'read(16k)'),
740 780 (makestdioread(32768), b'read(32k)'),
741 781 (makestdioread(131072), b'read(128k)'),
742 782 ]
743 783
744 784 with open(bundlepath, b'rb') as fh:
745 785 bundle = exchange.readbundle(ui, fh, bundlepath)
746 786
747 787 if isinstance(bundle, changegroup.cg1unpacker):
748 788 benches.extend([
749 789 (makebench(deltaiter), b'cg1 deltaiter()'),
750 790 (makebench(iterchunks), b'cg1 getchunks()'),
751 791 (makereadnbytes(8192), b'cg1 read(8k)'),
752 792 (makereadnbytes(16384), b'cg1 read(16k)'),
753 793 (makereadnbytes(32768), b'cg1 read(32k)'),
754 794 (makereadnbytes(131072), b'cg1 read(128k)'),
755 795 ])
756 796 elif isinstance(bundle, bundle2.unbundle20):
757 797 benches.extend([
758 798 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
759 799 (makebench(iterparts), b'bundle2 iterparts()'),
760 800 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
761 801 (makebench(seek), b'bundle2 part seek()'),
762 802 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
763 803 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
764 804 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
765 805 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
766 806 ])
767 807 elif isinstance(bundle, streamclone.streamcloneapplier):
768 808 raise error.Abort(b'stream clone bundles not supported')
769 809 else:
770 810 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
771 811
772 812 for fn, title in benches:
773 813 timer, fm = gettimer(ui, opts)
774 814 timer(fn, title=title)
775 815 fm.end()
776 816
777 817 @command(b'perfchangegroupchangelog', formatteropts +
778 818 [(b'', b'cgversion', b'02', b'changegroup version'),
779 819 (b'r', b'rev', b'', b'revisions to add to changegroup')])
780 820 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
781 821 """Benchmark producing a changelog group for a changegroup.
782 822
783 823 This measures the time spent processing the changelog during a
784 824 bundle operation. This occurs during `hg bundle` and on a server
785 825 processing a `getbundle` wire protocol request (handles clones
786 826 and pull requests).
787 827
788 828 By default, all revisions are added to the changegroup.
789 829 """
790 830 opts = _byteskwargs(opts)
791 831 cl = repo.changelog
792 832 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
793 833 bundler = changegroup.getbundler(cgversion, repo)
794 834
795 835 def d():
796 836 state, chunks = bundler._generatechangelog(cl, nodes)
797 837 for chunk in chunks:
798 838 pass
799 839
800 840 timer, fm = gettimer(ui, opts)
801 841
802 842 # Terminal printing can interfere with timing. So disable it.
803 843 with ui.configoverride({(b'progress', b'disable'): True}):
804 844 timer(d)
805 845
806 846 fm.end()
807 847
808 848 @command(b'perfdirs', formatteropts)
809 849 def perfdirs(ui, repo, **opts):
810 850 opts = _byteskwargs(opts)
811 851 timer, fm = gettimer(ui, opts)
812 852 dirstate = repo.dirstate
813 853 b'a' in dirstate
814 854 def d():
815 855 dirstate.hasdir(b'a')
816 856 del dirstate._map._dirs
817 857 timer(d)
818 858 fm.end()
819 859
820 860 @command(b'perfdirstate', formatteropts)
821 861 def perfdirstate(ui, repo, **opts):
822 862 opts = _byteskwargs(opts)
823 863 timer, fm = gettimer(ui, opts)
824 864 b"a" in repo.dirstate
825 865 def d():
826 866 repo.dirstate.invalidate()
827 867 b"a" in repo.dirstate
828 868 timer(d)
829 869 fm.end()
830 870
831 871 @command(b'perfdirstatedirs', formatteropts)
832 872 def perfdirstatedirs(ui, repo, **opts):
833 873 opts = _byteskwargs(opts)
834 874 timer, fm = gettimer(ui, opts)
835 875 b"a" in repo.dirstate
836 876 def d():
837 877 repo.dirstate.hasdir(b"a")
838 878 del repo.dirstate._map._dirs
839 879 timer(d)
840 880 fm.end()
841 881
842 882 @command(b'perfdirstatefoldmap', formatteropts)
843 883 def perfdirstatefoldmap(ui, repo, **opts):
844 884 opts = _byteskwargs(opts)
845 885 timer, fm = gettimer(ui, opts)
846 886 dirstate = repo.dirstate
847 887 b'a' in dirstate
848 888 def d():
849 889 dirstate._map.filefoldmap.get(b'a')
850 890 del dirstate._map.filefoldmap
851 891 timer(d)
852 892 fm.end()
853 893
854 894 @command(b'perfdirfoldmap', formatteropts)
855 895 def perfdirfoldmap(ui, repo, **opts):
856 896 opts = _byteskwargs(opts)
857 897 timer, fm = gettimer(ui, opts)
858 898 dirstate = repo.dirstate
859 899 b'a' in dirstate
860 900 def d():
861 901 dirstate._map.dirfoldmap.get(b'a')
862 902 del dirstate._map.dirfoldmap
863 903 del dirstate._map._dirs
864 904 timer(d)
865 905 fm.end()
866 906
867 907 @command(b'perfdirstatewrite', formatteropts)
868 908 def perfdirstatewrite(ui, repo, **opts):
869 909 opts = _byteskwargs(opts)
870 910 timer, fm = gettimer(ui, opts)
871 911 ds = repo.dirstate
872 912 b"a" in ds
873 913 def d():
874 914 ds._dirty = True
875 915 ds.write(repo.currenttransaction())
876 916 timer(d)
877 917 fm.end()
878 918
879 919 @command(b'perfmergecalculate',
880 920 [(b'r', b'rev', b'.', b'rev to merge against')] + formatteropts)
881 921 def perfmergecalculate(ui, repo, rev, **opts):
882 922 opts = _byteskwargs(opts)
883 923 timer, fm = gettimer(ui, opts)
884 924 wctx = repo[None]
885 925 rctx = scmutil.revsingle(repo, rev, rev)
886 926 ancestor = wctx.ancestor(rctx)
887 927 # we don't want working dir files to be stat'd in the benchmark, so prime
888 928 # that cache
889 929 wctx.dirty()
890 930 def d():
891 931 # acceptremote is True because we don't want prompts in the middle of
892 932 # our benchmark
893 933 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
894 934 acceptremote=True, followcopies=True)
895 935 timer(d)
896 936 fm.end()
897 937
898 938 @command(b'perfpathcopies', [], b"REV REV")
899 939 def perfpathcopies(ui, repo, rev1, rev2, **opts):
900 940 """benchmark the copy tracing logic"""
901 941 opts = _byteskwargs(opts)
902 942 timer, fm = gettimer(ui, opts)
903 943 ctx1 = scmutil.revsingle(repo, rev1, rev1)
904 944 ctx2 = scmutil.revsingle(repo, rev2, rev2)
905 945 def d():
906 946 copies.pathcopies(ctx1, ctx2)
907 947 timer(d)
908 948 fm.end()
909 949
910 950 @command(b'perfphases',
911 951 [(b'', b'full', False, b'include file reading time too'),
912 952 ], b"")
913 953 def perfphases(ui, repo, **opts):
914 954 """benchmark phasesets computation"""
915 955 opts = _byteskwargs(opts)
916 956 timer, fm = gettimer(ui, opts)
917 957 _phases = repo._phasecache
918 958 full = opts.get(b'full')
919 959 def d():
920 960 phases = _phases
921 961 if full:
922 962 clearfilecache(repo, b'_phasecache')
923 963 phases = repo._phasecache
924 964 phases.invalidate()
925 965 phases.loadphaserevs(repo)
926 966 timer(d)
927 967 fm.end()
928 968
929 969 @command(b'perfphasesremote',
930 970 [], b"[DEST]")
931 971 def perfphasesremote(ui, repo, dest=None, **opts):
932 972 """benchmark time needed to analyse phases of the remote server"""
933 973 from mercurial.node import (
934 974 bin,
935 975 )
936 976 from mercurial import (
937 977 exchange,
938 978 hg,
939 979 phases,
940 980 )
941 981 opts = _byteskwargs(opts)
942 982 timer, fm = gettimer(ui, opts)
943 983
944 984 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
945 985 if not path:
946 986 raise error.Abort((b'default repository not configured!'),
947 987 hint=(b"see 'hg help config.paths'"))
948 988 dest = path.pushloc or path.loc
949 989 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
950 990 other = hg.peer(repo, opts, dest)
951 991
952 992 # easier to perform discovery through the operation
953 993 op = exchange.pushoperation(repo, other)
954 994 exchange._pushdiscoverychangeset(op)
955 995
956 996 remotesubset = op.fallbackheads
957 997
958 998 with other.commandexecutor() as e:
959 999 remotephases = e.callcommand(b'listkeys',
960 1000 {b'namespace': b'phases'}).result()
961 1001 del other
962 1002 publishing = remotephases.get(b'publishing', False)
963 1003 if publishing:
964 1004 ui.status((b'publishing: yes\n'))
965 1005 else:
966 1006 ui.status((b'publishing: no\n'))
967 1007
968 1008 nodemap = repo.changelog.nodemap
969 1009 nonpublishroots = 0
970 1010 for nhex, phase in remotephases.iteritems():
971 1011 if nhex == b'publishing': # ignore data related to publish option
972 1012 continue
973 1013 node = bin(nhex)
974 1014 if node in nodemap and int(phase):
975 1015 nonpublishroots += 1
976 1016 ui.status((b'number of roots: %d\n') % len(remotephases))
977 1017 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
978 1018 def d():
979 1019 phases.remotephasessummary(repo,
980 1020 remotesubset,
981 1021 remotephases)
982 1022 timer(d)
983 1023 fm.end()
984 1024
985 1025 @command(b'perfmanifest',[
986 1026 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
987 1027 (b'', b'clear-disk', False, b'clear on-disk caches too'),
988 1028 ] + formatteropts, b'REV|NODE')
989 1029 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
990 1030 """benchmark the time to read a manifest from disk and return a usable
991 1031 dict-like object
992 1032
993 1033 Manifest caches are cleared before retrieval."""
994 1034 opts = _byteskwargs(opts)
995 1035 timer, fm = gettimer(ui, opts)
996 1036 if not manifest_rev:
997 1037 ctx = scmutil.revsingle(repo, rev, rev)
998 1038 t = ctx.manifestnode()
999 1039 else:
1000 1040 from mercurial.node import bin
1001 1041
1002 1042 if len(rev) == 40:
1003 1043 t = bin(rev)
1004 1044 else:
1005 1045 try:
1006 1046 rev = int(rev)
1007 1047
1008 1048 if util.safehasattr(repo.manifestlog, b'getstorage'):
1009 1049 t = repo.manifestlog.getstorage(b'').node(rev)
1010 1050 else:
1011 1051 t = repo.manifestlog._revlog.lookup(rev)
1012 1052 except ValueError:
1013 1053 raise error.Abort(b'manifest revision must be integer or full '
1014 1054 b'node')
1015 1055 def d():
1016 1056 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1017 1057 repo.manifestlog[t].read()
1018 1058 timer(d)
1019 1059 fm.end()
1020 1060
1021 1061 @command(b'perfchangeset', formatteropts)
1022 1062 def perfchangeset(ui, repo, rev, **opts):
1023 1063 opts = _byteskwargs(opts)
1024 1064 timer, fm = gettimer(ui, opts)
1025 1065 n = scmutil.revsingle(repo, rev).node()
1026 1066 def d():
1027 1067 repo.changelog.read(n)
1028 1068 #repo.changelog._cache = None
1029 1069 timer(d)
1030 1070 fm.end()
1031 1071
1032 1072 @command(b'perfignore', formatteropts)
1033 1073 def perfignore(ui, repo, **opts):
1034 1074 """benchmark operation related to computing ignore"""
1035 1075 opts = _byteskwargs(opts)
1036 1076 timer, fm = gettimer(ui, opts)
1037 1077 dirstate = repo.dirstate
1038 1078
1039 1079 def setupone():
1040 1080 dirstate.invalidate()
1041 1081 clearfilecache(dirstate, b'_ignore')
1042 1082
1043 1083 def runone():
1044 1084 dirstate._ignore
1045 1085
1046 1086 timer(runone, setup=setupone, title=b"load")
1047 1087 fm.end()
1048 1088
1049 1089 @command(b'perfindex', [
1050 1090 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1051 1091 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1052 1092 ] + formatteropts)
1053 1093 def perfindex(ui, repo, **opts):
1054 1094 """benchmark index creation time followed by a lookup
1055 1095
1056 1096 The default is to look `tip` up. Depending on the index implementation,
1057 1097 the revision looked up can matters. For example, an implementation
1058 1098 scanning the index will have a faster lookup time for `--rev tip` than for
1059 1099 `--rev 0`. The number of looked up revisions and their order can also
1060 1100 matters.
1061 1101
1062 1102 Example of useful set to test:
1063 1103 * tip
1064 1104 * 0
1065 1105 * -10:
1066 1106 * :10
1067 1107 * -10: + :10
1068 1108 * :10: + -10:
1069 1109 * -10000:
1070 1110 * -10000: + 0
1071 1111
1072 1112 It is not currently possible to check for lookup of a missing node. For
1073 1113 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1074 1114 import mercurial.revlog
1075 1115 opts = _byteskwargs(opts)
1076 1116 timer, fm = gettimer(ui, opts)
1077 1117 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1078 1118 if opts[b'no_lookup']:
1079 1119 if opts['rev']:
1080 1120 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1081 1121 nodes = []
1082 1122 elif not opts[b'rev']:
1083 1123 nodes = [repo[b"tip"].node()]
1084 1124 else:
1085 1125 revs = scmutil.revrange(repo, opts[b'rev'])
1086 1126 cl = repo.changelog
1087 1127 nodes = [cl.node(r) for r in revs]
1088 1128
1089 1129 unfi = repo.unfiltered()
1090 1130 # find the filecache func directly
1091 1131 # This avoid polluting the benchmark with the filecache logic
1092 1132 makecl = unfi.__class__.changelog.func
1093 1133 def setup():
1094 1134 # probably not necessary, but for good measure
1095 1135 clearchangelog(unfi)
1096 1136 def d():
1097 1137 cl = makecl(unfi)
1098 1138 for n in nodes:
1099 1139 cl.rev(n)
1100 1140 timer(d, setup=setup)
1101 1141 fm.end()
1102 1142
1103 1143 @command(b'perfnodemap', [
1104 1144 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1105 1145 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1106 1146 ] + formatteropts)
1107 1147 def perfnodemap(ui, repo, **opts):
1108 1148 """benchmark the time necessary to look up revision from a cold nodemap
1109 1149
1110 1150 Depending on the implementation, the amount and order of revision we look
1111 1151 up can varies. Example of useful set to test:
1112 1152 * tip
1113 1153 * 0
1114 1154 * -10:
1115 1155 * :10
1116 1156 * -10: + :10
1117 1157 * :10: + -10:
1118 1158 * -10000:
1119 1159 * -10000: + 0
1120 1160
1121 1161 The command currently focus on valid binary lookup. Benchmarking for
1122 1162 hexlookup, prefix lookup and missing lookup would also be valuable.
1123 1163 """
1124 1164 import mercurial.revlog
1125 1165 opts = _byteskwargs(opts)
1126 1166 timer, fm = gettimer(ui, opts)
1127 1167 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1128 1168
1129 1169 unfi = repo.unfiltered()
1130 1170 clearcaches = opts['clear_caches']
1131 1171 # find the filecache func directly
1132 1172 # This avoid polluting the benchmark with the filecache logic
1133 1173 makecl = unfi.__class__.changelog.func
1134 1174 if not opts[b'rev']:
1135 1175 raise error.Abort('use --rev to specify revisions to look up')
1136 1176 revs = scmutil.revrange(repo, opts[b'rev'])
1137 1177 cl = repo.changelog
1138 1178 nodes = [cl.node(r) for r in revs]
1139 1179
1140 1180 # use a list to pass reference to a nodemap from one closure to the next
1141 1181 nodeget = [None]
1142 1182 def setnodeget():
1143 1183 # probably not necessary, but for good measure
1144 1184 clearchangelog(unfi)
1145 1185 nodeget[0] = makecl(unfi).nodemap.get
1146 1186
1147 1187 def d():
1148 1188 get = nodeget[0]
1149 1189 for n in nodes:
1150 1190 get(n)
1151 1191
1152 1192 setup = None
1153 1193 if clearcaches:
1154 1194 def setup():
1155 1195 setnodeget()
1156 1196 else:
1157 1197 setnodeget()
1158 1198 d() # prewarm the data structure
1159 1199 timer(d, setup=setup)
1160 1200 fm.end()
1161 1201
1162 1202 @command(b'perfstartup', formatteropts)
1163 1203 def perfstartup(ui, repo, **opts):
1164 1204 opts = _byteskwargs(opts)
1165 1205 timer, fm = gettimer(ui, opts)
1166 1206 def d():
1167 1207 if os.name != r'nt':
1168 1208 os.system(b"HGRCPATH= %s version -q > /dev/null" %
1169 1209 fsencode(sys.argv[0]))
1170 1210 else:
1171 1211 os.environ[r'HGRCPATH'] = r' '
1172 1212 os.system(r"%s version -q > NUL" % sys.argv[0])
1173 1213 timer(d)
1174 1214 fm.end()
1175 1215
1176 1216 @command(b'perfparents', formatteropts)
1177 1217 def perfparents(ui, repo, **opts):
1178 1218 """benchmark the time necessary to fetch one changeset's parents.
1179 1219
1180 1220 The fetch is done using the `node identifier`, traversing all object layer
1181 1221 from the repository object. The N first revision will be used for this
1182 1222 benchmark. N is controlled by the ``perf.parentscount`` config option
1183 1223 (default: 1000).
1184 1224 """
1185 1225 opts = _byteskwargs(opts)
1186 1226 timer, fm = gettimer(ui, opts)
1187 1227 # control the number of commits perfparents iterates over
1188 1228 # experimental config: perf.parentscount
1189 1229 count = getint(ui, b"perf", b"parentscount", 1000)
1190 1230 if len(repo.changelog) < count:
1191 1231 raise error.Abort(b"repo needs %d commits for this test" % count)
1192 1232 repo = repo.unfiltered()
1193 1233 nl = [repo.changelog.node(i) for i in _xrange(count)]
1194 1234 def d():
1195 1235 for n in nl:
1196 1236 repo.changelog.parents(n)
1197 1237 timer(d)
1198 1238 fm.end()
1199 1239
1200 1240 @command(b'perfctxfiles', formatteropts)
1201 1241 def perfctxfiles(ui, repo, x, **opts):
1202 1242 opts = _byteskwargs(opts)
1203 1243 x = int(x)
1204 1244 timer, fm = gettimer(ui, opts)
1205 1245 def d():
1206 1246 len(repo[x].files())
1207 1247 timer(d)
1208 1248 fm.end()
1209 1249
1210 1250 @command(b'perfrawfiles', formatteropts)
1211 1251 def perfrawfiles(ui, repo, x, **opts):
1212 1252 opts = _byteskwargs(opts)
1213 1253 x = int(x)
1214 1254 timer, fm = gettimer(ui, opts)
1215 1255 cl = repo.changelog
1216 1256 def d():
1217 1257 len(cl.read(x)[3])
1218 1258 timer(d)
1219 1259 fm.end()
1220 1260
1221 1261 @command(b'perflookup', formatteropts)
1222 1262 def perflookup(ui, repo, rev, **opts):
1223 1263 opts = _byteskwargs(opts)
1224 1264 timer, fm = gettimer(ui, opts)
1225 1265 timer(lambda: len(repo.lookup(rev)))
1226 1266 fm.end()
1227 1267
1228 1268 @command(b'perflinelogedits',
1229 1269 [(b'n', b'edits', 10000, b'number of edits'),
1230 1270 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1231 1271 ], norepo=True)
1232 1272 def perflinelogedits(ui, **opts):
1233 1273 from mercurial import linelog
1234 1274
1235 1275 opts = _byteskwargs(opts)
1236 1276
1237 1277 edits = opts[b'edits']
1238 1278 maxhunklines = opts[b'max_hunk_lines']
1239 1279
1240 1280 maxb1 = 100000
1241 1281 random.seed(0)
1242 1282 randint = random.randint
1243 1283 currentlines = 0
1244 1284 arglist = []
1245 1285 for rev in _xrange(edits):
1246 1286 a1 = randint(0, currentlines)
1247 1287 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1248 1288 b1 = randint(0, maxb1)
1249 1289 b2 = randint(b1, b1 + maxhunklines)
1250 1290 currentlines += (b2 - b1) - (a2 - a1)
1251 1291 arglist.append((rev, a1, a2, b1, b2))
1252 1292
1253 1293 def d():
1254 1294 ll = linelog.linelog()
1255 1295 for args in arglist:
1256 1296 ll.replacelines(*args)
1257 1297
1258 1298 timer, fm = gettimer(ui, opts)
1259 1299 timer(d)
1260 1300 fm.end()
1261 1301
1262 1302 @command(b'perfrevrange', formatteropts)
1263 1303 def perfrevrange(ui, repo, *specs, **opts):
1264 1304 opts = _byteskwargs(opts)
1265 1305 timer, fm = gettimer(ui, opts)
1266 1306 revrange = scmutil.revrange
1267 1307 timer(lambda: len(revrange(repo, specs)))
1268 1308 fm.end()
1269 1309
1270 1310 @command(b'perfnodelookup', formatteropts)
1271 1311 def perfnodelookup(ui, repo, rev, **opts):
1272 1312 opts = _byteskwargs(opts)
1273 1313 timer, fm = gettimer(ui, opts)
1274 1314 import mercurial.revlog
1275 1315 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1276 1316 n = scmutil.revsingle(repo, rev).node()
1277 1317 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1278 1318 def d():
1279 1319 cl.rev(n)
1280 1320 clearcaches(cl)
1281 1321 timer(d)
1282 1322 fm.end()
1283 1323
1284 1324 @command(b'perflog',
1285 1325 [(b'', b'rename', False, b'ask log to follow renames')
1286 1326 ] + formatteropts)
1287 1327 def perflog(ui, repo, rev=None, **opts):
1288 1328 opts = _byteskwargs(opts)
1289 1329 if rev is None:
1290 1330 rev=[]
1291 1331 timer, fm = gettimer(ui, opts)
1292 1332 ui.pushbuffer()
1293 1333 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1294 1334 copies=opts.get(b'rename')))
1295 1335 ui.popbuffer()
1296 1336 fm.end()
1297 1337
1298 1338 @command(b'perfmoonwalk', formatteropts)
1299 1339 def perfmoonwalk(ui, repo, **opts):
1300 1340 """benchmark walking the changelog backwards
1301 1341
1302 1342 This also loads the changelog data for each revision in the changelog.
1303 1343 """
1304 1344 opts = _byteskwargs(opts)
1305 1345 timer, fm = gettimer(ui, opts)
1306 1346 def moonwalk():
1307 1347 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1308 1348 ctx = repo[i]
1309 1349 ctx.branch() # read changelog data (in addition to the index)
1310 1350 timer(moonwalk)
1311 1351 fm.end()
1312 1352
1313 1353 @command(b'perftemplating',
1314 1354 [(b'r', b'rev', [], b'revisions to run the template on'),
1315 1355 ] + formatteropts)
1316 1356 def perftemplating(ui, repo, testedtemplate=None, **opts):
1317 1357 """test the rendering time of a given template"""
1318 1358 if makelogtemplater is None:
1319 1359 raise error.Abort((b"perftemplating not available with this Mercurial"),
1320 1360 hint=b"use 4.3 or later")
1321 1361
1322 1362 opts = _byteskwargs(opts)
1323 1363
1324 1364 nullui = ui.copy()
1325 1365 nullui.fout = open(os.devnull, r'wb')
1326 1366 nullui.disablepager()
1327 1367 revs = opts.get(b'rev')
1328 1368 if not revs:
1329 1369 revs = [b'all()']
1330 1370 revs = list(scmutil.revrange(repo, revs))
1331 1371
1332 1372 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1333 1373 b' {author|person}: {desc|firstline}\n')
1334 1374 if testedtemplate is None:
1335 1375 testedtemplate = defaulttemplate
1336 1376 displayer = makelogtemplater(nullui, repo, testedtemplate)
1337 1377 def format():
1338 1378 for r in revs:
1339 1379 ctx = repo[r]
1340 1380 displayer.show(ctx)
1341 1381 displayer.flush(ctx)
1342 1382
1343 1383 timer, fm = gettimer(ui, opts)
1344 1384 timer(format)
1345 1385 fm.end()
1346 1386
1347 1387 @command(b'perfhelper-pathcopies', formatteropts +
1348 1388 [
1349 1389 (b'r', b'revs', [], b'restrict search to these revisions'),
1350 1390 (b'', b'timing', False, b'provides extra data (costly)'),
1351 1391 ])
1352 1392 def perfhelperpathcopies(ui, repo, revs=[], **opts):
1353 1393 """find statistic about potential parameters for the `perftracecopies`
1354 1394
1355 1395 This command find source-destination pair relevant for copytracing testing.
1356 1396 It report value for some of the parameters that impact copy tracing time.
1357 1397
1358 1398 If `--timing` is set, rename detection is run and the associated timing
1359 1399 will be reported. The extra details comes at the cost of a slower command
1360 1400 execution.
1361 1401
1362 1402 Since the rename detection is only run once, other factors might easily
1363 1403 affect the precision of the timing. However it should give a good
1364 1404 approximation of which revision pairs are very costly.
1365 1405 """
1366 1406 opts = _byteskwargs(opts)
1367 1407 fm = ui.formatter(b'perf', opts)
1368 1408 dotiming = opts[b'timing']
1369 1409
1370 1410 if dotiming:
1371 1411 header = '%12s %12s %12s %12s %12s %12s\n'
1372 1412 output = ("%(source)12s %(destination)12s "
1373 1413 "%(nbrevs)12d %(nbmissingfiles)12d "
1374 1414 "%(nbrenamedfiles)12d %(time)18.5f\n")
1375 1415 header_names = ("source", "destination", "nb-revs", "nb-files",
1376 1416 "nb-renames", "time")
1377 1417 fm.plain(header % header_names)
1378 1418 else:
1379 1419 header = '%12s %12s %12s %12s\n'
1380 1420 output = ("%(source)12s %(destination)12s "
1381 1421 "%(nbrevs)12d %(nbmissingfiles)12d\n")
1382 1422 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
1383 1423
1384 1424 if not revs:
1385 1425 revs = ['all()']
1386 1426 revs = scmutil.revrange(repo, revs)
1387 1427
1388 1428 roi = repo.revs('merge() and %ld', revs)
1389 1429 for r in roi:
1390 1430 ctx = repo[r]
1391 1431 p1 = ctx.p1().rev()
1392 1432 p2 = ctx.p2().rev()
1393 1433 bases = repo.changelog._commonancestorsheads(p1, p2)
1394 1434 for p in (p1, p2):
1395 1435 for b in bases:
1396 1436 base = repo[b]
1397 1437 parent = repo[p]
1398 1438 missing = copies._computeforwardmissing(base, parent)
1399 1439 if not missing:
1400 1440 continue
1401 1441 data = {
1402 1442 b'source': base.hex(),
1403 1443 b'destination': parent.hex(),
1404 1444 b'nbrevs': len(repo.revs('%d::%d', b, p)),
1405 1445 b'nbmissingfiles': len(missing),
1406 1446 }
1407 1447 if dotiming:
1408 1448 begin = util.timer()
1409 1449 renames = copies.pathcopies(base, parent)
1410 1450 end = util.timer()
1411 1451 # not very stable timing since we did only one run
1412 1452 data['time'] = end - begin
1413 1453 data['nbrenamedfiles'] = len(renames)
1414 1454 fm.startitem()
1415 1455 fm.data(**data)
1416 1456 out = data.copy()
1417 1457 out['source'] = fm.hexfunc(base.node())
1418 1458 out['destination'] = fm.hexfunc(parent.node())
1419 1459 fm.plain(output % out)
1420 1460
1421 1461 fm.end()
1422 1462
1423 1463 @command(b'perfcca', formatteropts)
1424 1464 def perfcca(ui, repo, **opts):
1425 1465 opts = _byteskwargs(opts)
1426 1466 timer, fm = gettimer(ui, opts)
1427 1467 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1428 1468 fm.end()
1429 1469
1430 1470 @command(b'perffncacheload', formatteropts)
1431 1471 def perffncacheload(ui, repo, **opts):
1432 1472 opts = _byteskwargs(opts)
1433 1473 timer, fm = gettimer(ui, opts)
1434 1474 s = repo.store
1435 1475 def d():
1436 1476 s.fncache._load()
1437 1477 timer(d)
1438 1478 fm.end()
1439 1479
1440 1480 @command(b'perffncachewrite', formatteropts)
1441 1481 def perffncachewrite(ui, repo, **opts):
1442 1482 opts = _byteskwargs(opts)
1443 1483 timer, fm = gettimer(ui, opts)
1444 1484 s = repo.store
1445 1485 lock = repo.lock()
1446 1486 s.fncache._load()
1447 1487 tr = repo.transaction(b'perffncachewrite')
1448 1488 tr.addbackup(b'fncache')
1449 1489 def d():
1450 1490 s.fncache._dirty = True
1451 1491 s.fncache.write(tr)
1452 1492 timer(d)
1453 1493 tr.close()
1454 1494 lock.release()
1455 1495 fm.end()
1456 1496
1457 1497 @command(b'perffncacheencode', formatteropts)
1458 1498 def perffncacheencode(ui, repo, **opts):
1459 1499 opts = _byteskwargs(opts)
1460 1500 timer, fm = gettimer(ui, opts)
1461 1501 s = repo.store
1462 1502 s.fncache._load()
1463 1503 def d():
1464 1504 for p in s.fncache.entries:
1465 1505 s.encode(p)
1466 1506 timer(d)
1467 1507 fm.end()
1468 1508
1469 1509 def _bdiffworker(q, blocks, xdiff, ready, done):
1470 1510 while not done.is_set():
1471 1511 pair = q.get()
1472 1512 while pair is not None:
1473 1513 if xdiff:
1474 1514 mdiff.bdiff.xdiffblocks(*pair)
1475 1515 elif blocks:
1476 1516 mdiff.bdiff.blocks(*pair)
1477 1517 else:
1478 1518 mdiff.textdiff(*pair)
1479 1519 q.task_done()
1480 1520 pair = q.get()
1481 1521 q.task_done() # for the None one
1482 1522 with ready:
1483 1523 ready.wait()
1484 1524
1485 1525 def _manifestrevision(repo, mnode):
1486 1526 ml = repo.manifestlog
1487 1527
1488 1528 if util.safehasattr(ml, b'getstorage'):
1489 1529 store = ml.getstorage(b'')
1490 1530 else:
1491 1531 store = ml._revlog
1492 1532
1493 1533 return store.revision(mnode)
1494 1534
1495 1535 @command(b'perfbdiff', revlogopts + formatteropts + [
1496 1536 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1497 1537 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1498 1538 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1499 1539 (b'', b'blocks', False, b'test computing diffs into blocks'),
1500 1540 (b'', b'xdiff', False, b'use xdiff algorithm'),
1501 1541 ],
1502 1542
1503 1543 b'-c|-m|FILE REV')
1504 1544 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1505 1545 """benchmark a bdiff between revisions
1506 1546
1507 1547 By default, benchmark a bdiff between its delta parent and itself.
1508 1548
1509 1549 With ``--count``, benchmark bdiffs between delta parents and self for N
1510 1550 revisions starting at the specified revision.
1511 1551
1512 1552 With ``--alldata``, assume the requested revision is a changeset and
1513 1553 measure bdiffs for all changes related to that changeset (manifest
1514 1554 and filelogs).
1515 1555 """
1516 1556 opts = _byteskwargs(opts)
1517 1557
1518 1558 if opts[b'xdiff'] and not opts[b'blocks']:
1519 1559 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1520 1560
1521 1561 if opts[b'alldata']:
1522 1562 opts[b'changelog'] = True
1523 1563
1524 1564 if opts.get(b'changelog') or opts.get(b'manifest'):
1525 1565 file_, rev = None, file_
1526 1566 elif rev is None:
1527 1567 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1528 1568
1529 1569 blocks = opts[b'blocks']
1530 1570 xdiff = opts[b'xdiff']
1531 1571 textpairs = []
1532 1572
1533 1573 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1534 1574
1535 1575 startrev = r.rev(r.lookup(rev))
1536 1576 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1537 1577 if opts[b'alldata']:
1538 1578 # Load revisions associated with changeset.
1539 1579 ctx = repo[rev]
1540 1580 mtext = _manifestrevision(repo, ctx.manifestnode())
1541 1581 for pctx in ctx.parents():
1542 1582 pman = _manifestrevision(repo, pctx.manifestnode())
1543 1583 textpairs.append((pman, mtext))
1544 1584
1545 1585 # Load filelog revisions by iterating manifest delta.
1546 1586 man = ctx.manifest()
1547 1587 pman = ctx.p1().manifest()
1548 1588 for filename, change in pman.diff(man).items():
1549 1589 fctx = repo.file(filename)
1550 1590 f1 = fctx.revision(change[0][0] or -1)
1551 1591 f2 = fctx.revision(change[1][0] or -1)
1552 1592 textpairs.append((f1, f2))
1553 1593 else:
1554 1594 dp = r.deltaparent(rev)
1555 1595 textpairs.append((r.revision(dp), r.revision(rev)))
1556 1596
1557 1597 withthreads = threads > 0
1558 1598 if not withthreads:
1559 1599 def d():
1560 1600 for pair in textpairs:
1561 1601 if xdiff:
1562 1602 mdiff.bdiff.xdiffblocks(*pair)
1563 1603 elif blocks:
1564 1604 mdiff.bdiff.blocks(*pair)
1565 1605 else:
1566 1606 mdiff.textdiff(*pair)
1567 1607 else:
1568 1608 q = queue()
1569 1609 for i in _xrange(threads):
1570 1610 q.put(None)
1571 1611 ready = threading.Condition()
1572 1612 done = threading.Event()
1573 1613 for i in _xrange(threads):
1574 1614 threading.Thread(target=_bdiffworker,
1575 1615 args=(q, blocks, xdiff, ready, done)).start()
1576 1616 q.join()
1577 1617 def d():
1578 1618 for pair in textpairs:
1579 1619 q.put(pair)
1580 1620 for i in _xrange(threads):
1581 1621 q.put(None)
1582 1622 with ready:
1583 1623 ready.notify_all()
1584 1624 q.join()
1585 1625 timer, fm = gettimer(ui, opts)
1586 1626 timer(d)
1587 1627 fm.end()
1588 1628
1589 1629 if withthreads:
1590 1630 done.set()
1591 1631 for i in _xrange(threads):
1592 1632 q.put(None)
1593 1633 with ready:
1594 1634 ready.notify_all()
1595 1635
1596 1636 @command(b'perfunidiff', revlogopts + formatteropts + [
1597 1637 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1598 1638 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
1599 1639 ], b'-c|-m|FILE REV')
1600 1640 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1601 1641 """benchmark a unified diff between revisions
1602 1642
1603 1643 This doesn't include any copy tracing - it's just a unified diff
1604 1644 of the texts.
1605 1645
1606 1646 By default, benchmark a diff between its delta parent and itself.
1607 1647
1608 1648 With ``--count``, benchmark diffs between delta parents and self for N
1609 1649 revisions starting at the specified revision.
1610 1650
1611 1651 With ``--alldata``, assume the requested revision is a changeset and
1612 1652 measure diffs for all changes related to that changeset (manifest
1613 1653 and filelogs).
1614 1654 """
1615 1655 opts = _byteskwargs(opts)
1616 1656 if opts[b'alldata']:
1617 1657 opts[b'changelog'] = True
1618 1658
1619 1659 if opts.get(b'changelog') or opts.get(b'manifest'):
1620 1660 file_, rev = None, file_
1621 1661 elif rev is None:
1622 1662 raise error.CommandError(b'perfunidiff', b'invalid arguments')
1623 1663
1624 1664 textpairs = []
1625 1665
1626 1666 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
1627 1667
1628 1668 startrev = r.rev(r.lookup(rev))
1629 1669 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1630 1670 if opts[b'alldata']:
1631 1671 # Load revisions associated with changeset.
1632 1672 ctx = repo[rev]
1633 1673 mtext = _manifestrevision(repo, ctx.manifestnode())
1634 1674 for pctx in ctx.parents():
1635 1675 pman = _manifestrevision(repo, pctx.manifestnode())
1636 1676 textpairs.append((pman, mtext))
1637 1677
1638 1678 # Load filelog revisions by iterating manifest delta.
1639 1679 man = ctx.manifest()
1640 1680 pman = ctx.p1().manifest()
1641 1681 for filename, change in pman.diff(man).items():
1642 1682 fctx = repo.file(filename)
1643 1683 f1 = fctx.revision(change[0][0] or -1)
1644 1684 f2 = fctx.revision(change[1][0] or -1)
1645 1685 textpairs.append((f1, f2))
1646 1686 else:
1647 1687 dp = r.deltaparent(rev)
1648 1688 textpairs.append((r.revision(dp), r.revision(rev)))
1649 1689
1650 1690 def d():
1651 1691 for left, right in textpairs:
1652 1692 # The date strings don't matter, so we pass empty strings.
1653 1693 headerlines, hunks = mdiff.unidiff(
1654 1694 left, b'', right, b'', b'left', b'right', binary=False)
1655 1695 # consume iterators in roughly the way patch.py does
1656 1696 b'\n'.join(headerlines)
1657 1697 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1658 1698 timer, fm = gettimer(ui, opts)
1659 1699 timer(d)
1660 1700 fm.end()
1661 1701
1662 1702 @command(b'perfdiffwd', formatteropts)
1663 1703 def perfdiffwd(ui, repo, **opts):
1664 1704 """Profile diff of working directory changes"""
1665 1705 opts = _byteskwargs(opts)
1666 1706 timer, fm = gettimer(ui, opts)
1667 1707 options = {
1668 1708 'w': 'ignore_all_space',
1669 1709 'b': 'ignore_space_change',
1670 1710 'B': 'ignore_blank_lines',
1671 1711 }
1672 1712
1673 1713 for diffopt in ('', 'w', 'b', 'B', 'wB'):
1674 1714 opts = dict((options[c], b'1') for c in diffopt)
1675 1715 def d():
1676 1716 ui.pushbuffer()
1677 1717 commands.diff(ui, repo, **opts)
1678 1718 ui.popbuffer()
1679 1719 diffopt = diffopt.encode('ascii')
1680 1720 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
1681 1721 timer(d, title=title)
1682 1722 fm.end()
1683 1723
1684 1724 @command(b'perfrevlogindex', revlogopts + formatteropts,
1685 1725 b'-c|-m|FILE')
1686 1726 def perfrevlogindex(ui, repo, file_=None, **opts):
1687 1727 """Benchmark operations against a revlog index.
1688 1728
1689 1729 This tests constructing a revlog instance, reading index data,
1690 1730 parsing index data, and performing various operations related to
1691 1731 index data.
1692 1732 """
1693 1733
1694 1734 opts = _byteskwargs(opts)
1695 1735
1696 1736 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
1697 1737
1698 1738 opener = getattr(rl, 'opener') # trick linter
1699 1739 indexfile = rl.indexfile
1700 1740 data = opener.read(indexfile)
1701 1741
1702 1742 header = struct.unpack(b'>I', data[0:4])[0]
1703 1743 version = header & 0xFFFF
1704 1744 if version == 1:
1705 1745 revlogio = revlog.revlogio()
1706 1746 inline = header & (1 << 16)
1707 1747 else:
1708 1748 raise error.Abort((b'unsupported revlog version: %d') % version)
1709 1749
1710 1750 rllen = len(rl)
1711 1751
1712 1752 node0 = rl.node(0)
1713 1753 node25 = rl.node(rllen // 4)
1714 1754 node50 = rl.node(rllen // 2)
1715 1755 node75 = rl.node(rllen // 4 * 3)
1716 1756 node100 = rl.node(rllen - 1)
1717 1757
1718 1758 allrevs = range(rllen)
1719 1759 allrevsrev = list(reversed(allrevs))
1720 1760 allnodes = [rl.node(rev) for rev in range(rllen)]
1721 1761 allnodesrev = list(reversed(allnodes))
1722 1762
1723 1763 def constructor():
1724 1764 revlog.revlog(opener, indexfile)
1725 1765
1726 1766 def read():
1727 1767 with opener(indexfile) as fh:
1728 1768 fh.read()
1729 1769
1730 1770 def parseindex():
1731 1771 revlogio.parseindex(data, inline)
1732 1772
1733 1773 def getentry(revornode):
1734 1774 index = revlogio.parseindex(data, inline)[0]
1735 1775 index[revornode]
1736 1776
1737 1777 def getentries(revs, count=1):
1738 1778 index = revlogio.parseindex(data, inline)[0]
1739 1779
1740 1780 for i in range(count):
1741 1781 for rev in revs:
1742 1782 index[rev]
1743 1783
1744 1784 def resolvenode(node):
1745 1785 nodemap = revlogio.parseindex(data, inline)[1]
1746 1786 # This only works for the C code.
1747 1787 if nodemap is None:
1748 1788 return
1749 1789
1750 1790 try:
1751 1791 nodemap[node]
1752 1792 except error.RevlogError:
1753 1793 pass
1754 1794
1755 1795 def resolvenodes(nodes, count=1):
1756 1796 nodemap = revlogio.parseindex(data, inline)[1]
1757 1797 if nodemap is None:
1758 1798 return
1759 1799
1760 1800 for i in range(count):
1761 1801 for node in nodes:
1762 1802 try:
1763 1803 nodemap[node]
1764 1804 except error.RevlogError:
1765 1805 pass
1766 1806
1767 1807 benches = [
1768 1808 (constructor, b'revlog constructor'),
1769 1809 (read, b'read'),
1770 1810 (parseindex, b'create index object'),
1771 1811 (lambda: getentry(0), b'retrieve index entry for rev 0'),
1772 1812 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
1773 1813 (lambda: resolvenode(node0), b'look up node at rev 0'),
1774 1814 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
1775 1815 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
1776 1816 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
1777 1817 (lambda: resolvenode(node100), b'look up node at tip'),
1778 1818 # 2x variation is to measure caching impact.
1779 1819 (lambda: resolvenodes(allnodes),
1780 1820 b'look up all nodes (forward)'),
1781 1821 (lambda: resolvenodes(allnodes, 2),
1782 1822 b'look up all nodes 2x (forward)'),
1783 1823 (lambda: resolvenodes(allnodesrev),
1784 1824 b'look up all nodes (reverse)'),
1785 1825 (lambda: resolvenodes(allnodesrev, 2),
1786 1826 b'look up all nodes 2x (reverse)'),
1787 1827 (lambda: getentries(allrevs),
1788 1828 b'retrieve all index entries (forward)'),
1789 1829 (lambda: getentries(allrevs, 2),
1790 1830 b'retrieve all index entries 2x (forward)'),
1791 1831 (lambda: getentries(allrevsrev),
1792 1832 b'retrieve all index entries (reverse)'),
1793 1833 (lambda: getentries(allrevsrev, 2),
1794 1834 b'retrieve all index entries 2x (reverse)'),
1795 1835 ]
1796 1836
1797 1837 for fn, title in benches:
1798 1838 timer, fm = gettimer(ui, opts)
1799 1839 timer(fn, title=title)
1800 1840 fm.end()
1801 1841
1802 1842 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
1803 1843 [(b'd', b'dist', 100, b'distance between the revisions'),
1804 1844 (b's', b'startrev', 0, b'revision to start reading at'),
1805 1845 (b'', b'reverse', False, b'read in reverse')],
1806 1846 b'-c|-m|FILE')
1807 1847 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
1808 1848 **opts):
1809 1849 """Benchmark reading a series of revisions from a revlog.
1810 1850
1811 1851 By default, we read every ``-d/--dist`` revision from 0 to tip of
1812 1852 the specified revlog.
1813 1853
1814 1854 The start revision can be defined via ``-s/--startrev``.
1815 1855 """
1816 1856 opts = _byteskwargs(opts)
1817 1857
1818 1858 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
1819 1859 rllen = getlen(ui)(rl)
1820 1860
1821 1861 if startrev < 0:
1822 1862 startrev = rllen + startrev
1823 1863
1824 1864 def d():
1825 1865 rl.clearcaches()
1826 1866
1827 1867 beginrev = startrev
1828 1868 endrev = rllen
1829 1869 dist = opts[b'dist']
1830 1870
1831 1871 if reverse:
1832 1872 beginrev, endrev = endrev - 1, beginrev - 1
1833 1873 dist = -1 * dist
1834 1874
1835 1875 for x in _xrange(beginrev, endrev, dist):
1836 1876 # Old revisions don't support passing int.
1837 1877 n = rl.node(x)
1838 1878 rl.revision(n)
1839 1879
1840 1880 timer, fm = gettimer(ui, opts)
1841 1881 timer(d)
1842 1882 fm.end()
1843 1883
1844 1884 @command(b'perfrevlogwrite', revlogopts + formatteropts +
1845 1885 [(b's', b'startrev', 1000, b'revision to start writing at'),
1846 1886 (b'', b'stoprev', -1, b'last revision to write'),
1847 1887 (b'', b'count', 3, b'last revision to write'),
1848 1888 (b'', b'details', False, b'print timing for every revisions tested'),
1849 1889 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
1850 1890 (b'', b'lazydeltabase', True, b'try the provided delta first'),
1851 1891 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1852 1892 ],
1853 1893 b'-c|-m|FILE')
1854 1894 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
1855 1895 """Benchmark writing a series of revisions to a revlog.
1856 1896
1857 1897 Possible source values are:
1858 1898 * `full`: add from a full text (default).
1859 1899 * `parent-1`: add from a delta to the first parent
1860 1900 * `parent-2`: add from a delta to the second parent if it exists
1861 1901 (use a delta from the first parent otherwise)
1862 1902 * `parent-smallest`: add from the smallest delta (either p1 or p2)
1863 1903 * `storage`: add from the existing precomputed deltas
1864 1904 """
1865 1905 opts = _byteskwargs(opts)
1866 1906
1867 1907 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
1868 1908 rllen = getlen(ui)(rl)
1869 1909 if startrev < 0:
1870 1910 startrev = rllen + startrev
1871 1911 if stoprev < 0:
1872 1912 stoprev = rllen + stoprev
1873 1913
1874 1914 lazydeltabase = opts['lazydeltabase']
1875 1915 source = opts['source']
1876 1916 clearcaches = opts['clear_caches']
1877 1917 validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest',
1878 1918 b'storage')
1879 1919 if source not in validsource:
1880 1920 raise error.Abort('invalid source type: %s' % source)
1881 1921
1882 1922 ### actually gather results
1883 1923 count = opts['count']
1884 1924 if count <= 0:
1885 1925 raise error.Abort('invalide run count: %d' % count)
1886 1926 allresults = []
1887 1927 for c in range(count):
1888 1928 timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1,
1889 1929 lazydeltabase=lazydeltabase,
1890 1930 clearcaches=clearcaches)
1891 1931 allresults.append(timing)
1892 1932
1893 1933 ### consolidate the results in a single list
1894 1934 results = []
1895 1935 for idx, (rev, t) in enumerate(allresults[0]):
1896 1936 ts = [t]
1897 1937 for other in allresults[1:]:
1898 1938 orev, ot = other[idx]
1899 1939 assert orev == rev
1900 1940 ts.append(ot)
1901 1941 results.append((rev, ts))
1902 1942 resultcount = len(results)
1903 1943
1904 1944 ### Compute and display relevant statistics
1905 1945
1906 1946 # get a formatter
1907 1947 fm = ui.formatter(b'perf', opts)
1908 1948 displayall = ui.configbool(b"perf", b"all-timing", False)
1909 1949
1910 1950 # print individual details if requested
1911 1951 if opts['details']:
1912 1952 for idx, item in enumerate(results, 1):
1913 1953 rev, data = item
1914 1954 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
1915 1955 formatone(fm, data, title=title, displayall=displayall)
1916 1956
1917 1957 # sorts results by median time
1918 1958 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
1919 1959 # list of (name, index) to display)
1920 1960 relevants = [
1921 1961 ("min", 0),
1922 1962 ("10%", resultcount * 10 // 100),
1923 1963 ("25%", resultcount * 25 // 100),
1924 1964 ("50%", resultcount * 70 // 100),
1925 1965 ("75%", resultcount * 75 // 100),
1926 1966 ("90%", resultcount * 90 // 100),
1927 1967 ("95%", resultcount * 95 // 100),
1928 1968 ("99%", resultcount * 99 // 100),
1929 1969 ("99.9%", resultcount * 999 // 1000),
1930 1970 ("99.99%", resultcount * 9999 // 10000),
1931 1971 ("99.999%", resultcount * 99999 // 100000),
1932 1972 ("max", -1),
1933 1973 ]
1934 1974 if not ui.quiet:
1935 1975 for name, idx in relevants:
1936 1976 data = results[idx]
1937 1977 title = '%s of %d, rev %d' % (name, resultcount, data[0])
1938 1978 formatone(fm, data[1], title=title, displayall=displayall)
1939 1979
1940 1980 # XXX summing that many float will not be very precise, we ignore this fact
1941 1981 # for now
1942 1982 totaltime = []
1943 1983 for item in allresults:
1944 1984 totaltime.append((sum(x[1][0] for x in item),
1945 1985 sum(x[1][1] for x in item),
1946 1986 sum(x[1][2] for x in item),)
1947 1987 )
1948 1988 formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
1949 1989 displayall=displayall)
1950 1990 fm.end()
1951 1991
1952 1992 class _faketr(object):
1953 1993 def add(s, x, y, z=None):
1954 1994 return None
1955 1995
1956 1996 def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None,
1957 1997 lazydeltabase=True, clearcaches=True):
1958 1998 timings = []
1959 1999 tr = _faketr()
1960 2000 with _temprevlog(ui, orig, startrev) as dest:
1961 2001 dest._lazydeltabase = lazydeltabase
1962 2002 revs = list(orig.revs(startrev, stoprev))
1963 2003 total = len(revs)
1964 2004 topic = 'adding'
1965 2005 if runidx is not None:
1966 2006 topic += ' (run #%d)' % runidx
1967 2007 # Support both old and new progress API
1968 2008 if util.safehasattr(ui, 'makeprogress'):
1969 2009 progress = ui.makeprogress(topic, unit='revs', total=total)
1970 2010 def updateprogress(pos):
1971 2011 progress.update(pos)
1972 2012 def completeprogress():
1973 2013 progress.complete()
1974 2014 else:
1975 2015 def updateprogress(pos):
1976 2016 ui.progress(topic, pos, unit='revs', total=total)
1977 2017 def completeprogress():
1978 2018 ui.progress(topic, None, unit='revs', total=total)
1979 2019
1980 2020 for idx, rev in enumerate(revs):
1981 2021 updateprogress(idx)
1982 2022 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
1983 2023 if clearcaches:
1984 2024 dest.index.clearcaches()
1985 2025 dest.clearcaches()
1986 2026 with timeone() as r:
1987 2027 dest.addrawrevision(*addargs, **addkwargs)
1988 2028 timings.append((rev, r[0]))
1989 2029 updateprogress(total)
1990 2030 completeprogress()
1991 2031 return timings
1992 2032
1993 2033 def _getrevisionseed(orig, rev, tr, source):
1994 2034 from mercurial.node import nullid
1995 2035
1996 2036 linkrev = orig.linkrev(rev)
1997 2037 node = orig.node(rev)
1998 2038 p1, p2 = orig.parents(node)
1999 2039 flags = orig.flags(rev)
2000 2040 cachedelta = None
2001 2041 text = None
2002 2042
2003 2043 if source == b'full':
2004 2044 text = orig.revision(rev)
2005 2045 elif source == b'parent-1':
2006 2046 baserev = orig.rev(p1)
2007 2047 cachedelta = (baserev, orig.revdiff(p1, rev))
2008 2048 elif source == b'parent-2':
2009 2049 parent = p2
2010 2050 if p2 == nullid:
2011 2051 parent = p1
2012 2052 baserev = orig.rev(parent)
2013 2053 cachedelta = (baserev, orig.revdiff(parent, rev))
2014 2054 elif source == b'parent-smallest':
2015 2055 p1diff = orig.revdiff(p1, rev)
2016 2056 parent = p1
2017 2057 diff = p1diff
2018 2058 if p2 != nullid:
2019 2059 p2diff = orig.revdiff(p2, rev)
2020 2060 if len(p1diff) > len(p2diff):
2021 2061 parent = p2
2022 2062 diff = p2diff
2023 2063 baserev = orig.rev(parent)
2024 2064 cachedelta = (baserev, diff)
2025 2065 elif source == b'storage':
2026 2066 baserev = orig.deltaparent(rev)
2027 2067 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
2028 2068
2029 2069 return ((text, tr, linkrev, p1, p2),
2030 2070 {'node': node, 'flags': flags, 'cachedelta': cachedelta})
2031 2071
2032 2072 @contextlib.contextmanager
2033 2073 def _temprevlog(ui, orig, truncaterev):
2034 2074 from mercurial import vfs as vfsmod
2035 2075
2036 2076 if orig._inline:
2037 2077 raise error.Abort('not supporting inline revlog (yet)')
2038 2078
2039 2079 origindexpath = orig.opener.join(orig.indexfile)
2040 2080 origdatapath = orig.opener.join(orig.datafile)
2041 2081 indexname = 'revlog.i'
2042 2082 dataname = 'revlog.d'
2043 2083
2044 2084 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
2045 2085 try:
2046 2086 # copy the data file in a temporary directory
2047 2087 ui.debug('copying data in %s\n' % tmpdir)
2048 2088 destindexpath = os.path.join(tmpdir, 'revlog.i')
2049 2089 destdatapath = os.path.join(tmpdir, 'revlog.d')
2050 2090 shutil.copyfile(origindexpath, destindexpath)
2051 2091 shutil.copyfile(origdatapath, destdatapath)
2052 2092
2053 2093 # remove the data we want to add again
2054 2094 ui.debug('truncating data to be rewritten\n')
2055 2095 with open(destindexpath, 'ab') as index:
2056 2096 index.seek(0)
2057 2097 index.truncate(truncaterev * orig._io.size)
2058 2098 with open(destdatapath, 'ab') as data:
2059 2099 data.seek(0)
2060 2100 data.truncate(orig.start(truncaterev))
2061 2101
2062 2102 # instantiate a new revlog from the temporary copy
2063 2103 ui.debug('truncating adding to be rewritten\n')
2064 2104 vfs = vfsmod.vfs(tmpdir)
2065 2105 vfs.options = getattr(orig.opener, 'options', None)
2066 2106
2067 2107 dest = revlog.revlog(vfs,
2068 2108 indexfile=indexname,
2069 2109 datafile=dataname)
2070 2110 if dest._inline:
2071 2111 raise error.Abort('not supporting inline revlog (yet)')
2072 2112 # make sure internals are initialized
2073 2113 dest.revision(len(dest) - 1)
2074 2114 yield dest
2075 2115 del dest, vfs
2076 2116 finally:
2077 2117 shutil.rmtree(tmpdir, True)
2078 2118
2079 2119 @command(b'perfrevlogchunks', revlogopts + formatteropts +
2080 2120 [(b'e', b'engines', b'', b'compression engines to use'),
2081 2121 (b's', b'startrev', 0, b'revision to start at')],
2082 2122 b'-c|-m|FILE')
2083 2123 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
2084 2124 """Benchmark operations on revlog chunks.
2085 2125
2086 2126 Logically, each revlog is a collection of fulltext revisions. However,
2087 2127 stored within each revlog are "chunks" of possibly compressed data. This
2088 2128 data needs to be read and decompressed or compressed and written.
2089 2129
2090 2130 This command measures the time it takes to read+decompress and recompress
2091 2131 chunks in a revlog. It effectively isolates I/O and compression performance.
2092 2132 For measurements of higher-level operations like resolving revisions,
2093 2133 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
2094 2134 """
2095 2135 opts = _byteskwargs(opts)
2096 2136
2097 2137 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
2098 2138
2099 2139 # _chunkraw was renamed to _getsegmentforrevs.
2100 2140 try:
2101 2141 segmentforrevs = rl._getsegmentforrevs
2102 2142 except AttributeError:
2103 2143 segmentforrevs = rl._chunkraw
2104 2144
2105 2145 # Verify engines argument.
2106 2146 if engines:
2107 2147 engines = set(e.strip() for e in engines.split(b','))
2108 2148 for engine in engines:
2109 2149 try:
2110 2150 util.compressionengines[engine]
2111 2151 except KeyError:
2112 2152 raise error.Abort(b'unknown compression engine: %s' % engine)
2113 2153 else:
2114 2154 engines = []
2115 2155 for e in util.compengines:
2116 2156 engine = util.compengines[e]
2117 2157 try:
2118 2158 if engine.available():
2119 2159 engine.revlogcompressor().compress(b'dummy')
2120 2160 engines.append(e)
2121 2161 except NotImplementedError:
2122 2162 pass
2123 2163
2124 2164 revs = list(rl.revs(startrev, len(rl) - 1))
2125 2165
2126 2166 def rlfh(rl):
2127 2167 if rl._inline:
2128 2168 return getsvfs(repo)(rl.indexfile)
2129 2169 else:
2130 2170 return getsvfs(repo)(rl.datafile)
2131 2171
2132 2172 def doread():
2133 2173 rl.clearcaches()
2134 2174 for rev in revs:
2135 2175 segmentforrevs(rev, rev)
2136 2176
2137 2177 def doreadcachedfh():
2138 2178 rl.clearcaches()
2139 2179 fh = rlfh(rl)
2140 2180 for rev in revs:
2141 2181 segmentforrevs(rev, rev, df=fh)
2142 2182
2143 2183 def doreadbatch():
2144 2184 rl.clearcaches()
2145 2185 segmentforrevs(revs[0], revs[-1])
2146 2186
2147 2187 def doreadbatchcachedfh():
2148 2188 rl.clearcaches()
2149 2189 fh = rlfh(rl)
2150 2190 segmentforrevs(revs[0], revs[-1], df=fh)
2151 2191
2152 2192 def dochunk():
2153 2193 rl.clearcaches()
2154 2194 fh = rlfh(rl)
2155 2195 for rev in revs:
2156 2196 rl._chunk(rev, df=fh)
2157 2197
2158 2198 chunks = [None]
2159 2199
2160 2200 def dochunkbatch():
2161 2201 rl.clearcaches()
2162 2202 fh = rlfh(rl)
2163 2203 # Save chunks as a side-effect.
2164 2204 chunks[0] = rl._chunks(revs, df=fh)
2165 2205
2166 2206 def docompress(compressor):
2167 2207 rl.clearcaches()
2168 2208
2169 2209 try:
2170 2210 # Swap in the requested compression engine.
2171 2211 oldcompressor = rl._compressor
2172 2212 rl._compressor = compressor
2173 2213 for chunk in chunks[0]:
2174 2214 rl.compress(chunk)
2175 2215 finally:
2176 2216 rl._compressor = oldcompressor
2177 2217
2178 2218 benches = [
2179 2219 (lambda: doread(), b'read'),
2180 2220 (lambda: doreadcachedfh(), b'read w/ reused fd'),
2181 2221 (lambda: doreadbatch(), b'read batch'),
2182 2222 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
2183 2223 (lambda: dochunk(), b'chunk'),
2184 2224 (lambda: dochunkbatch(), b'chunk batch'),
2185 2225 ]
2186 2226
2187 2227 for engine in sorted(engines):
2188 2228 compressor = util.compengines[engine].revlogcompressor()
2189 2229 benches.append((functools.partial(docompress, compressor),
2190 2230 b'compress w/ %s' % engine))
2191 2231
2192 2232 for fn, title in benches:
2193 2233 timer, fm = gettimer(ui, opts)
2194 2234 timer(fn, title=title)
2195 2235 fm.end()
2196 2236
2197 2237 @command(b'perfrevlogrevision', revlogopts + formatteropts +
2198 2238 [(b'', b'cache', False, b'use caches instead of clearing')],
2199 2239 b'-c|-m|FILE REV')
2200 2240 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
2201 2241 """Benchmark obtaining a revlog revision.
2202 2242
2203 2243 Obtaining a revlog revision consists of roughly the following steps:
2204 2244
2205 2245 1. Compute the delta chain
2206 2246 2. Slice the delta chain if applicable
2207 2247 3. Obtain the raw chunks for that delta chain
2208 2248 4. Decompress each raw chunk
2209 2249 5. Apply binary patches to obtain fulltext
2210 2250 6. Verify hash of fulltext
2211 2251
2212 2252 This command measures the time spent in each of these phases.
2213 2253 """
2214 2254 opts = _byteskwargs(opts)
2215 2255
2216 2256 if opts.get(b'changelog') or opts.get(b'manifest'):
2217 2257 file_, rev = None, file_
2218 2258 elif rev is None:
2219 2259 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
2220 2260
2221 2261 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
2222 2262
2223 2263 # _chunkraw was renamed to _getsegmentforrevs.
2224 2264 try:
2225 2265 segmentforrevs = r._getsegmentforrevs
2226 2266 except AttributeError:
2227 2267 segmentforrevs = r._chunkraw
2228 2268
2229 2269 node = r.lookup(rev)
2230 2270 rev = r.rev(node)
2231 2271
2232 2272 def getrawchunks(data, chain):
2233 2273 start = r.start
2234 2274 length = r.length
2235 2275 inline = r._inline
2236 2276 iosize = r._io.size
2237 2277 buffer = util.buffer
2238 2278
2239 2279 chunks = []
2240 2280 ladd = chunks.append
2241 2281 for idx, item in enumerate(chain):
2242 2282 offset = start(item[0])
2243 2283 bits = data[idx]
2244 2284 for rev in item:
2245 2285 chunkstart = start(rev)
2246 2286 if inline:
2247 2287 chunkstart += (rev + 1) * iosize
2248 2288 chunklength = length(rev)
2249 2289 ladd(buffer(bits, chunkstart - offset, chunklength))
2250 2290
2251 2291 return chunks
2252 2292
2253 2293 def dodeltachain(rev):
2254 2294 if not cache:
2255 2295 r.clearcaches()
2256 2296 r._deltachain(rev)
2257 2297
2258 2298 def doread(chain):
2259 2299 if not cache:
2260 2300 r.clearcaches()
2261 2301 for item in slicedchain:
2262 2302 segmentforrevs(item[0], item[-1])
2263 2303
2264 2304 def doslice(r, chain, size):
2265 2305 for s in slicechunk(r, chain, targetsize=size):
2266 2306 pass
2267 2307
2268 2308 def dorawchunks(data, chain):
2269 2309 if not cache:
2270 2310 r.clearcaches()
2271 2311 getrawchunks(data, chain)
2272 2312
2273 2313 def dodecompress(chunks):
2274 2314 decomp = r.decompress
2275 2315 for chunk in chunks:
2276 2316 decomp(chunk)
2277 2317
2278 2318 def dopatch(text, bins):
2279 2319 if not cache:
2280 2320 r.clearcaches()
2281 2321 mdiff.patches(text, bins)
2282 2322
2283 2323 def dohash(text):
2284 2324 if not cache:
2285 2325 r.clearcaches()
2286 2326 r.checkhash(text, node, rev=rev)
2287 2327
2288 2328 def dorevision():
2289 2329 if not cache:
2290 2330 r.clearcaches()
2291 2331 r.revision(node)
2292 2332
2293 2333 try:
2294 2334 from mercurial.revlogutils.deltas import slicechunk
2295 2335 except ImportError:
2296 2336 slicechunk = getattr(revlog, '_slicechunk', None)
2297 2337
2298 2338 size = r.length(rev)
2299 2339 chain = r._deltachain(rev)[0]
2300 2340 if not getattr(r, '_withsparseread', False):
2301 2341 slicedchain = (chain,)
2302 2342 else:
2303 2343 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
2304 2344 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
2305 2345 rawchunks = getrawchunks(data, slicedchain)
2306 2346 bins = r._chunks(chain)
2307 2347 text = bytes(bins[0])
2308 2348 bins = bins[1:]
2309 2349 text = mdiff.patches(text, bins)
2310 2350
2311 2351 benches = [
2312 2352 (lambda: dorevision(), b'full'),
2313 2353 (lambda: dodeltachain(rev), b'deltachain'),
2314 2354 (lambda: doread(chain), b'read'),
2315 2355 ]
2316 2356
2317 2357 if getattr(r, '_withsparseread', False):
2318 2358 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
2319 2359 benches.append(slicing)
2320 2360
2321 2361 benches.extend([
2322 2362 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
2323 2363 (lambda: dodecompress(rawchunks), b'decompress'),
2324 2364 (lambda: dopatch(text, bins), b'patch'),
2325 2365 (lambda: dohash(text), b'hash'),
2326 2366 ])
2327 2367
2328 2368 timer, fm = gettimer(ui, opts)
2329 2369 for fn, title in benches:
2330 2370 timer(fn, title=title)
2331 2371 fm.end()
2332 2372
2333 2373 @command(b'perfrevset',
2334 2374 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
2335 2375 (b'', b'contexts', False, b'obtain changectx for each revision')]
2336 2376 + formatteropts, b"REVSET")
2337 2377 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
2338 2378 """benchmark the execution time of a revset
2339 2379
2340 2380 Use the --clean option if need to evaluate the impact of build volatile
2341 2381 revisions set cache on the revset execution. Volatile cache hold filtered
2342 2382 and obsolete related cache."""
2343 2383 opts = _byteskwargs(opts)
2344 2384
2345 2385 timer, fm = gettimer(ui, opts)
2346 2386 def d():
2347 2387 if clear:
2348 2388 repo.invalidatevolatilesets()
2349 2389 if contexts:
2350 2390 for ctx in repo.set(expr): pass
2351 2391 else:
2352 2392 for r in repo.revs(expr): pass
2353 2393 timer(d)
2354 2394 fm.end()
2355 2395
2356 2396 @command(b'perfvolatilesets',
2357 2397 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
2358 2398 ] + formatteropts)
2359 2399 def perfvolatilesets(ui, repo, *names, **opts):
2360 2400 """benchmark the computation of various volatile set
2361 2401
2362 2402 Volatile set computes element related to filtering and obsolescence."""
2363 2403 opts = _byteskwargs(opts)
2364 2404 timer, fm = gettimer(ui, opts)
2365 2405 repo = repo.unfiltered()
2366 2406
2367 2407 def getobs(name):
2368 2408 def d():
2369 2409 repo.invalidatevolatilesets()
2370 2410 if opts[b'clear_obsstore']:
2371 2411 clearfilecache(repo, b'obsstore')
2372 2412 obsolete.getrevs(repo, name)
2373 2413 return d
2374 2414
2375 2415 allobs = sorted(obsolete.cachefuncs)
2376 2416 if names:
2377 2417 allobs = [n for n in allobs if n in names]
2378 2418
2379 2419 for name in allobs:
2380 2420 timer(getobs(name), title=name)
2381 2421
2382 2422 def getfiltered(name):
2383 2423 def d():
2384 2424 repo.invalidatevolatilesets()
2385 2425 if opts[b'clear_obsstore']:
2386 2426 clearfilecache(repo, b'obsstore')
2387 2427 repoview.filterrevs(repo, name)
2388 2428 return d
2389 2429
2390 2430 allfilter = sorted(repoview.filtertable)
2391 2431 if names:
2392 2432 allfilter = [n for n in allfilter if n in names]
2393 2433
2394 2434 for name in allfilter:
2395 2435 timer(getfiltered(name), title=name)
2396 2436 fm.end()
2397 2437
2398 2438 @command(b'perfbranchmap',
2399 2439 [(b'f', b'full', False,
2400 2440 b'Includes build time of subset'),
2401 2441 (b'', b'clear-revbranch', False,
2402 2442 b'purge the revbranch cache between computation'),
2403 2443 ] + formatteropts)
2404 2444 def perfbranchmap(ui, repo, *filternames, **opts):
2405 2445 """benchmark the update of a branchmap
2406 2446
2407 2447 This benchmarks the full repo.branchmap() call with read and write disabled
2408 2448 """
2409 2449 opts = _byteskwargs(opts)
2410 2450 full = opts.get(b"full", False)
2411 2451 clear_revbranch = opts.get(b"clear_revbranch", False)
2412 2452 timer, fm = gettimer(ui, opts)
2413 2453 def getbranchmap(filtername):
2414 2454 """generate a benchmark function for the filtername"""
2415 2455 if filtername is None:
2416 2456 view = repo
2417 2457 else:
2418 2458 view = repo.filtered(filtername)
2419 2459 if util.safehasattr(view._branchcaches, '_per_filter'):
2420 2460 filtered = view._branchcaches._per_filter
2421 2461 else:
2422 2462 # older versions
2423 2463 filtered = view._branchcaches
2424 2464 def d():
2425 2465 if clear_revbranch:
2426 2466 repo.revbranchcache()._clear()
2427 2467 if full:
2428 2468 view._branchcaches.clear()
2429 2469 else:
2430 2470 filtered.pop(filtername, None)
2431 2471 view.branchmap()
2432 2472 return d
2433 2473 # add filter in smaller subset to bigger subset
2434 2474 possiblefilters = set(repoview.filtertable)
2435 2475 if filternames:
2436 2476 possiblefilters &= set(filternames)
2437 2477 subsettable = getbranchmapsubsettable()
2438 2478 allfilters = []
2439 2479 while possiblefilters:
2440 2480 for name in possiblefilters:
2441 2481 subset = subsettable.get(name)
2442 2482 if subset not in possiblefilters:
2443 2483 break
2444 2484 else:
2445 2485 assert False, b'subset cycle %s!' % possiblefilters
2446 2486 allfilters.append(name)
2447 2487 possiblefilters.remove(name)
2448 2488
2449 2489 # warm the cache
2450 2490 if not full:
2451 2491 for name in allfilters:
2452 2492 repo.filtered(name).branchmap()
2453 2493 if not filternames or b'unfiltered' in filternames:
2454 2494 # add unfiltered
2455 2495 allfilters.append(None)
2456 2496
2457 2497 if util.safehasattr(branchmap.branchcache, 'fromfile'):
2458 2498 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
2459 2499 branchcacheread.set(classmethod(lambda *args: None))
2460 2500 else:
2461 2501 # older versions
2462 2502 branchcacheread = safeattrsetter(branchmap, b'read')
2463 2503 branchcacheread.set(lambda *args: None)
2464 2504 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
2465 2505 branchcachewrite.set(lambda *args: None)
2466 2506 try:
2467 2507 for name in allfilters:
2468 2508 printname = name
2469 2509 if name is None:
2470 2510 printname = b'unfiltered'
2471 2511 timer(getbranchmap(name), title=str(printname))
2472 2512 finally:
2473 2513 branchcacheread.restore()
2474 2514 branchcachewrite.restore()
2475 2515 fm.end()
2476 2516
2477 2517 @command(b'perfbranchmapupdate', [
2478 2518 (b'', b'base', [], b'subset of revision to start from'),
2479 2519 (b'', b'target', [], b'subset of revision to end with'),
2480 2520 (b'', b'clear-caches', False, b'clear cache between each runs')
2481 2521 ] + formatteropts)
2482 2522 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
2483 2523 """benchmark branchmap update from for <base> revs to <target> revs
2484 2524
2485 2525 If `--clear-caches` is passed, the following items will be reset before
2486 2526 each update:
2487 2527 * the changelog instance and associated indexes
2488 2528 * the rev-branch-cache instance
2489 2529
2490 2530 Examples:
2491 2531
2492 2532 # update for the one last revision
2493 2533 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
2494 2534
2495 2535 $ update for change coming with a new branch
2496 2536 $ hg perfbranchmapupdate --base 'stable' --target 'default'
2497 2537 """
2498 2538 from mercurial import branchmap
2499 2539 from mercurial import repoview
2500 2540 opts = _byteskwargs(opts)
2501 2541 timer, fm = gettimer(ui, opts)
2502 2542 clearcaches = opts[b'clear_caches']
2503 2543 unfi = repo.unfiltered()
2504 2544 x = [None] # used to pass data between closure
2505 2545
2506 2546 # we use a `list` here to avoid possible side effect from smartset
2507 2547 baserevs = list(scmutil.revrange(repo, base))
2508 2548 targetrevs = list(scmutil.revrange(repo, target))
2509 2549 if not baserevs:
2510 2550 raise error.Abort(b'no revisions selected for --base')
2511 2551 if not targetrevs:
2512 2552 raise error.Abort(b'no revisions selected for --target')
2513 2553
2514 2554 # make sure the target branchmap also contains the one in the base
2515 2555 targetrevs = list(set(baserevs) | set(targetrevs))
2516 2556 targetrevs.sort()
2517 2557
2518 2558 cl = repo.changelog
2519 2559 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
2520 2560 allbaserevs.sort()
2521 2561 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
2522 2562
2523 2563 newrevs = list(alltargetrevs.difference(allbaserevs))
2524 2564 newrevs.sort()
2525 2565
2526 2566 allrevs = frozenset(unfi.changelog.revs())
2527 2567 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
2528 2568 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
2529 2569
2530 2570 def basefilter(repo, visibilityexceptions=None):
2531 2571 return basefilterrevs
2532 2572
2533 2573 def targetfilter(repo, visibilityexceptions=None):
2534 2574 return targetfilterrevs
2535 2575
2536 2576 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
2537 2577 ui.status(msg % (len(allbaserevs), len(newrevs)))
2538 2578 if targetfilterrevs:
2539 2579 msg = b'(%d revisions still filtered)\n'
2540 2580 ui.status(msg % len(targetfilterrevs))
2541 2581
2542 2582 try:
2543 2583 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
2544 2584 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
2545 2585
2546 2586 baserepo = repo.filtered(b'__perf_branchmap_update_base')
2547 2587 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
2548 2588
2549 2589 # try to find an existing branchmap to reuse
2550 2590 subsettable = getbranchmapsubsettable()
2551 2591 candidatefilter = subsettable.get(None)
2552 2592 while candidatefilter is not None:
2553 2593 candidatebm = repo.filtered(candidatefilter).branchmap()
2554 2594 if candidatebm.validfor(baserepo):
2555 2595 filtered = repoview.filterrevs(repo, candidatefilter)
2556 2596 missing = [r for r in allbaserevs if r in filtered]
2557 2597 base = candidatebm.copy()
2558 2598 base.update(baserepo, missing)
2559 2599 break
2560 2600 candidatefilter = subsettable.get(candidatefilter)
2561 2601 else:
2562 2602 # no suitable subset where found
2563 2603 base = branchmap.branchcache()
2564 2604 base.update(baserepo, allbaserevs)
2565 2605
2566 2606 def setup():
2567 2607 x[0] = base.copy()
2568 2608 if clearcaches:
2569 2609 unfi._revbranchcache = None
2570 2610 clearchangelog(repo)
2571 2611
2572 2612 def bench():
2573 2613 x[0].update(targetrepo, newrevs)
2574 2614
2575 2615 timer(bench, setup=setup)
2576 2616 fm.end()
2577 2617 finally:
2578 2618 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
2579 2619 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
2580 2620
2581 2621 @command(b'perfbranchmapload', [
2582 2622 (b'f', b'filter', b'', b'Specify repoview filter'),
2583 2623 (b'', b'list', False, b'List brachmap filter caches'),
2584 2624 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
2585 2625
2586 2626 ] + formatteropts)
2587 2627 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
2588 2628 """benchmark reading the branchmap"""
2589 2629 opts = _byteskwargs(opts)
2590 2630 clearrevlogs = opts[b'clear_revlogs']
2591 2631
2592 2632 if list:
2593 2633 for name, kind, st in repo.cachevfs.readdir(stat=True):
2594 2634 if name.startswith(b'branch2'):
2595 2635 filtername = name.partition(b'-')[2] or b'unfiltered'
2596 2636 ui.status(b'%s - %s\n'
2597 2637 % (filtername, util.bytecount(st.st_size)))
2598 2638 return
2599 2639 if not filter:
2600 2640 filter = None
2601 2641 subsettable = getbranchmapsubsettable()
2602 2642 if filter is None:
2603 2643 repo = repo.unfiltered()
2604 2644 else:
2605 2645 repo = repoview.repoview(repo, filter)
2606 2646
2607 2647 repo.branchmap() # make sure we have a relevant, up to date branchmap
2608 2648
2609 2649 try:
2610 2650 fromfile = branchmap.branchcache.fromfile
2611 2651 except AttributeError:
2612 2652 # older versions
2613 2653 fromfile = branchmap.read
2614 2654
2615 2655 currentfilter = filter
2616 2656 # try once without timer, the filter may not be cached
2617 2657 while fromfile(repo) is None:
2618 2658 currentfilter = subsettable.get(currentfilter)
2619 2659 if currentfilter is None:
2620 2660 raise error.Abort(b'No branchmap cached for %s repo'
2621 2661 % (filter or b'unfiltered'))
2622 2662 repo = repo.filtered(currentfilter)
2623 2663 timer, fm = gettimer(ui, opts)
2624 2664 def setup():
2625 2665 if clearrevlogs:
2626 2666 clearchangelog(repo)
2627 2667 def bench():
2628 2668 fromfile(repo)
2629 2669 timer(bench, setup=setup)
2630 2670 fm.end()
2631 2671
2632 2672 @command(b'perfloadmarkers')
2633 2673 def perfloadmarkers(ui, repo):
2634 2674 """benchmark the time to parse the on-disk markers for a repo
2635 2675
2636 2676 Result is the number of markers in the repo."""
2637 2677 timer, fm = gettimer(ui)
2638 2678 svfs = getsvfs(repo)
2639 2679 timer(lambda: len(obsolete.obsstore(svfs)))
2640 2680 fm.end()
2641 2681
2642 2682 @command(b'perflrucachedict', formatteropts +
2643 2683 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
2644 2684 (b'', b'mincost', 0, b'smallest cost of items in cache'),
2645 2685 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
2646 2686 (b'', b'size', 4, b'size of cache'),
2647 2687 (b'', b'gets', 10000, b'number of key lookups'),
2648 2688 (b'', b'sets', 10000, b'number of key sets'),
2649 2689 (b'', b'mixed', 10000, b'number of mixed mode operations'),
2650 2690 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
2651 2691 norepo=True)
2652 2692 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
2653 2693 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
2654 2694 opts = _byteskwargs(opts)
2655 2695
2656 2696 def doinit():
2657 2697 for i in _xrange(10000):
2658 2698 util.lrucachedict(size)
2659 2699
2660 2700 costrange = list(range(mincost, maxcost + 1))
2661 2701
2662 2702 values = []
2663 2703 for i in _xrange(size):
2664 2704 values.append(random.randint(0, _maxint))
2665 2705
2666 2706 # Get mode fills the cache and tests raw lookup performance with no
2667 2707 # eviction.
2668 2708 getseq = []
2669 2709 for i in _xrange(gets):
2670 2710 getseq.append(random.choice(values))
2671 2711
2672 2712 def dogets():
2673 2713 d = util.lrucachedict(size)
2674 2714 for v in values:
2675 2715 d[v] = v
2676 2716 for key in getseq:
2677 2717 value = d[key]
2678 2718 value # silence pyflakes warning
2679 2719
2680 2720 def dogetscost():
2681 2721 d = util.lrucachedict(size, maxcost=costlimit)
2682 2722 for i, v in enumerate(values):
2683 2723 d.insert(v, v, cost=costs[i])
2684 2724 for key in getseq:
2685 2725 try:
2686 2726 value = d[key]
2687 2727 value # silence pyflakes warning
2688 2728 except KeyError:
2689 2729 pass
2690 2730
2691 2731 # Set mode tests insertion speed with cache eviction.
2692 2732 setseq = []
2693 2733 costs = []
2694 2734 for i in _xrange(sets):
2695 2735 setseq.append(random.randint(0, _maxint))
2696 2736 costs.append(random.choice(costrange))
2697 2737
2698 2738 def doinserts():
2699 2739 d = util.lrucachedict(size)
2700 2740 for v in setseq:
2701 2741 d.insert(v, v)
2702 2742
2703 2743 def doinsertscost():
2704 2744 d = util.lrucachedict(size, maxcost=costlimit)
2705 2745 for i, v in enumerate(setseq):
2706 2746 d.insert(v, v, cost=costs[i])
2707 2747
2708 2748 def dosets():
2709 2749 d = util.lrucachedict(size)
2710 2750 for v in setseq:
2711 2751 d[v] = v
2712 2752
2713 2753 # Mixed mode randomly performs gets and sets with eviction.
2714 2754 mixedops = []
2715 2755 for i in _xrange(mixed):
2716 2756 r = random.randint(0, 100)
2717 2757 if r < mixedgetfreq:
2718 2758 op = 0
2719 2759 else:
2720 2760 op = 1
2721 2761
2722 2762 mixedops.append((op,
2723 2763 random.randint(0, size * 2),
2724 2764 random.choice(costrange)))
2725 2765
2726 2766 def domixed():
2727 2767 d = util.lrucachedict(size)
2728 2768
2729 2769 for op, v, cost in mixedops:
2730 2770 if op == 0:
2731 2771 try:
2732 2772 d[v]
2733 2773 except KeyError:
2734 2774 pass
2735 2775 else:
2736 2776 d[v] = v
2737 2777
2738 2778 def domixedcost():
2739 2779 d = util.lrucachedict(size, maxcost=costlimit)
2740 2780
2741 2781 for op, v, cost in mixedops:
2742 2782 if op == 0:
2743 2783 try:
2744 2784 d[v]
2745 2785 except KeyError:
2746 2786 pass
2747 2787 else:
2748 2788 d.insert(v, v, cost=cost)
2749 2789
2750 2790 benches = [
2751 2791 (doinit, b'init'),
2752 2792 ]
2753 2793
2754 2794 if costlimit:
2755 2795 benches.extend([
2756 2796 (dogetscost, b'gets w/ cost limit'),
2757 2797 (doinsertscost, b'inserts w/ cost limit'),
2758 2798 (domixedcost, b'mixed w/ cost limit'),
2759 2799 ])
2760 2800 else:
2761 2801 benches.extend([
2762 2802 (dogets, b'gets'),
2763 2803 (doinserts, b'inserts'),
2764 2804 (dosets, b'sets'),
2765 2805 (domixed, b'mixed')
2766 2806 ])
2767 2807
2768 2808 for fn, title in benches:
2769 2809 timer, fm = gettimer(ui, opts)
2770 2810 timer(fn, title=title)
2771 2811 fm.end()
2772 2812
2773 2813 @command(b'perfwrite', formatteropts)
2774 2814 def perfwrite(ui, repo, **opts):
2775 2815 """microbenchmark ui.write
2776 2816 """
2777 2817 opts = _byteskwargs(opts)
2778 2818
2779 2819 timer, fm = gettimer(ui, opts)
2780 2820 def write():
2781 2821 for i in range(100000):
2782 2822 ui.write((b'Testing write performance\n'))
2783 2823 timer(write)
2784 2824 fm.end()
2785 2825
2786 2826 def uisetup(ui):
2787 2827 if (util.safehasattr(cmdutil, b'openrevlog') and
2788 2828 not util.safehasattr(commands, b'debugrevlogopts')):
2789 2829 # for "historical portability":
2790 2830 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
2791 2831 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
2792 2832 # openrevlog() should cause failure, because it has been
2793 2833 # available since 3.5 (or 49c583ca48c4).
2794 2834 def openrevlog(orig, repo, cmd, file_, opts):
2795 2835 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
2796 2836 raise error.Abort(b"This version doesn't support --dir option",
2797 2837 hint=b"use 3.5 or later")
2798 2838 return orig(repo, cmd, file_, opts)
2799 2839 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
2800 2840
2801 2841 @command(b'perfprogress', formatteropts + [
2802 2842 (b'', b'topic', b'topic', b'topic for progress messages'),
2803 2843 (b'c', b'total', 1000000, b'total value we are progressing to'),
2804 2844 ], norepo=True)
2805 2845 def perfprogress(ui, topic=None, total=None, **opts):
2806 2846 """printing of progress bars"""
2807 2847 opts = _byteskwargs(opts)
2808 2848
2809 2849 timer, fm = gettimer(ui, opts)
2810 2850
2811 2851 def doprogress():
2812 2852 with ui.makeprogress(topic, total=total) as progress:
2813 2853 for i in pycompat.xrange(total):
2814 2854 progress.increment()
2815 2855
2816 2856 timer(doprogress)
2817 2857 fm.end()
@@ -1,320 +1,355 b''
1 1 #require test-repo
2 2
3 3 Set vars:
4 4
5 5 $ . "$TESTDIR/helpers-testrepo.sh"
6 6 $ CONTRIBDIR="$TESTDIR/../contrib"
7 7
8 8 Prepare repo:
9 9
10 10 $ hg init
11 11
12 12 $ echo this is file a > a
13 13 $ hg add a
14 14 $ hg commit -m first
15 15
16 16 $ echo adding to file a >> a
17 17 $ hg commit -m second
18 18
19 19 $ echo adding more to file a >> a
20 20 $ hg commit -m third
21 21
22 22 $ hg up -r 0
23 23 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
24 24 $ echo merge-this >> a
25 25 $ hg commit -m merge-able
26 26 created new head
27 27
28 28 $ hg up -r 2
29 29 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
30 30
31 31 perfstatus
32 32
33 33 $ cat >> $HGRCPATH << EOF
34 34 > [extensions]
35 35 > perf=$CONTRIBDIR/perf.py
36 36 > [perf]
37 37 > presleep=0
38 38 > stub=on
39 39 > parentscount=1
40 40 > EOF
41 41 $ hg help -e perf
42 42 perf extension - helper extension to measure performance
43 43
44 44 Configurations
45 45 ==============
46 46
47 47 "perf"
48 48 ------
49 49
50 50 "all-timing"
51 51 When set, additional statistic will be reported for each benchmark: best,
52 52 worst, median average. If not set only the best timing is reported
53 53 (default: off).
54 54
55 55 "presleep"
56 56 number of second to wait before any group of run (default: 1)
57 57
58 "run-limits"
59 Control the number of run each benchmark will perform. The option value
60 should be a list of '<time>-<numberofrun>' pairs. After each run the
61 condition are considered in order with the following logic:
62
63 If benchmark have been running for <time> seconds, and we have performed
64 <numberofrun> iterations, stop the benchmark,
65
66 The default value is: '3.0-100, 10.0-3'
67
58 68 "stub"
59 69 When set, benchmark will only be run once, useful for testing (default:
60 70 off)
61 71
62 72 list of commands:
63 73
64 74 perfaddremove
65 75 (no help text available)
66 76 perfancestors
67 77 (no help text available)
68 78 perfancestorset
69 79 (no help text available)
70 80 perfannotate (no help text available)
71 81 perfbdiff benchmark a bdiff between revisions
72 82 perfbookmarks
73 83 benchmark parsing bookmarks from disk to memory
74 84 perfbranchmap
75 85 benchmark the update of a branchmap
76 86 perfbranchmapload
77 87 benchmark reading the branchmap
78 88 perfbranchmapupdate
79 89 benchmark branchmap update from for <base> revs to <target>
80 90 revs
81 91 perfbundleread
82 92 Benchmark reading of bundle files.
83 93 perfcca (no help text available)
84 94 perfchangegroupchangelog
85 95 Benchmark producing a changelog group for a changegroup.
86 96 perfchangeset
87 97 (no help text available)
88 98 perfctxfiles (no help text available)
89 99 perfdiffwd Profile diff of working directory changes
90 100 perfdirfoldmap
91 101 (no help text available)
92 102 perfdirs (no help text available)
93 103 perfdirstate (no help text available)
94 104 perfdirstatedirs
95 105 (no help text available)
96 106 perfdirstatefoldmap
97 107 (no help text available)
98 108 perfdirstatewrite
99 109 (no help text available)
100 110 perfdiscovery
101 111 benchmark discovery between local repo and the peer at given
102 112 path
103 113 perffncacheencode
104 114 (no help text available)
105 115 perffncacheload
106 116 (no help text available)
107 117 perffncachewrite
108 118 (no help text available)
109 119 perfheads benchmark the computation of a changelog heads
110 120 perfhelper-pathcopies
111 121 find statistic about potential parameters for the
112 122 'perftracecopies'
113 123 perfignore benchmark operation related to computing ignore
114 124 perfindex benchmark index creation time followed by a lookup
115 125 perflinelogedits
116 126 (no help text available)
117 127 perfloadmarkers
118 128 benchmark the time to parse the on-disk markers for a repo
119 129 perflog (no help text available)
120 130 perflookup (no help text available)
121 131 perflrucachedict
122 132 (no help text available)
123 133 perfmanifest benchmark the time to read a manifest from disk and return a
124 134 usable
125 135 perfmergecalculate
126 136 (no help text available)
127 137 perfmoonwalk benchmark walking the changelog backwards
128 138 perfnodelookup
129 139 (no help text available)
130 140 perfnodemap benchmark the time necessary to look up revision from a cold
131 141 nodemap
132 142 perfparents benchmark the time necessary to fetch one changeset's parents.
133 143 perfpathcopies
134 144 benchmark the copy tracing logic
135 145 perfphases benchmark phasesets computation
136 146 perfphasesremote
137 147 benchmark time needed to analyse phases of the remote server
138 148 perfprogress printing of progress bars
139 149 perfrawfiles (no help text available)
140 150 perfrevlogchunks
141 151 Benchmark operations on revlog chunks.
142 152 perfrevlogindex
143 153 Benchmark operations against a revlog index.
144 154 perfrevlogrevision
145 155 Benchmark obtaining a revlog revision.
146 156 perfrevlogrevisions
147 157 Benchmark reading a series of revisions from a revlog.
148 158 perfrevlogwrite
149 159 Benchmark writing a series of revisions to a revlog.
150 160 perfrevrange (no help text available)
151 161 perfrevset benchmark the execution time of a revset
152 162 perfstartup (no help text available)
153 163 perfstatus (no help text available)
154 164 perftags (no help text available)
155 165 perftemplating
156 166 test the rendering time of a given template
157 167 perfunidiff benchmark a unified diff between revisions
158 168 perfvolatilesets
159 169 benchmark the computation of various volatile set
160 170 perfwalk (no help text available)
161 171 perfwrite microbenchmark ui.write
162 172
163 173 (use 'hg help -v perf' to show built-in aliases and global options)
164 174 $ hg perfaddremove
165 175 $ hg perfancestors
166 176 $ hg perfancestorset 2
167 177 $ hg perfannotate a
168 178 $ hg perfbdiff -c 1
169 179 $ hg perfbdiff --alldata 1
170 180 $ hg perfunidiff -c 1
171 181 $ hg perfunidiff --alldata 1
172 182 $ hg perfbookmarks
173 183 $ hg perfbranchmap
174 184 $ hg perfbranchmapload
175 185 $ hg perfbranchmapupdate --base "not tip" --target "tip"
176 186 benchmark of branchmap with 3 revisions with 1 new ones
177 187 $ hg perfcca
178 188 $ hg perfchangegroupchangelog
179 189 $ hg perfchangegroupchangelog --cgversion 01
180 190 $ hg perfchangeset 2
181 191 $ hg perfctxfiles 2
182 192 $ hg perfdiffwd
183 193 $ hg perfdirfoldmap
184 194 $ hg perfdirs
185 195 $ hg perfdirstate
186 196 $ hg perfdirstatedirs
187 197 $ hg perfdirstatefoldmap
188 198 $ hg perfdirstatewrite
189 199 #if repofncache
190 200 $ hg perffncacheencode
191 201 $ hg perffncacheload
192 202 $ hg debugrebuildfncache
193 203 fncache already up to date
194 204 $ hg perffncachewrite
195 205 $ hg debugrebuildfncache
196 206 fncache already up to date
197 207 #endif
198 208 $ hg perfheads
199 209 $ hg perfignore
200 210 $ hg perfindex
201 211 $ hg perflinelogedits -n 1
202 212 $ hg perfloadmarkers
203 213 $ hg perflog
204 214 $ hg perflookup 2
205 215 $ hg perflrucache
206 216 $ hg perfmanifest 2
207 217 $ hg perfmanifest -m 44fe2c8352bb3a478ffd7d8350bbc721920134d1
208 218 $ hg perfmanifest -m 44fe2c8352bb
209 219 abort: manifest revision must be integer or full node
210 220 [255]
211 221 $ hg perfmergecalculate -r 3
212 222 $ hg perfmoonwalk
213 223 $ hg perfnodelookup 2
214 224 $ hg perfpathcopies 1 2
215 225 $ hg perfprogress --total 1000
216 226 $ hg perfrawfiles 2
217 227 $ hg perfrevlogindex -c
218 228 #if reporevlogstore
219 229 $ hg perfrevlogrevisions .hg/store/data/a.i
220 230 #endif
221 231 $ hg perfrevlogrevision -m 0
222 232 $ hg perfrevlogchunks -c
223 233 $ hg perfrevrange
224 234 $ hg perfrevset 'all()'
225 235 $ hg perfstartup
226 236 $ hg perfstatus
227 237 $ hg perftags
228 238 $ hg perftemplating
229 239 $ hg perfvolatilesets
230 240 $ hg perfwalk
231 241 $ hg perfparents
232 242 $ hg perfdiscovery -q .
233 243
244 Test run control
245 ----------------
246
247 Simple single entry
248
249 $ hg perfparents --config perf.stub=no --config perf.run-limits='0.000000001-15'
250 ! wall * comb * user * sys * (best of 15) (glob)
251
252 Multiple entries
253
254 $ hg perfparents --config perf.stub=no --config perf.run-limits='500000-1, 0.000000001-5'
255 ! wall * comb * user * sys * (best of 5) (glob)
256
257 error case are ignored
258
259 $ hg perfparents --config perf.stub=no --config perf.run-limits='500, 0.000000001-5'
260 malformatted run limit entry, missing "-": 500
261 ! wall * comb * user * sys * (best of 5) (glob)
262 $ hg perfparents --config perf.stub=no --config perf.run-limits='aaa-12, 0.000000001-5'
263 malformatted run limit entry, could not convert string to float: aaa: aaa-12
264 ! wall * comb * user * sys * (best of 5) (glob)
265 $ hg perfparents --config perf.stub=no --config perf.run-limits='12-aaaaaa, 0.000000001-5'
266 malformatted run limit entry, invalid literal for int() with base 10: 'aaaaaa': 12-aaaaaa
267 ! wall * comb * user * sys * (best of 5) (glob)
268
234 269 test actual output
235 270 ------------------
236 271
237 272 normal output:
238 273
239 274 $ hg perfheads --config perf.stub=no
240 275 ! wall * comb * user * sys * (best of *) (glob)
241 276
242 277 detailed output:
243 278
244 279 $ hg perfheads --config perf.all-timing=yes --config perf.stub=no
245 280 ! wall * comb * user * sys * (best of *) (glob)
246 281 ! wall * comb * user * sys * (max of *) (glob)
247 282 ! wall * comb * user * sys * (avg of *) (glob)
248 283 ! wall * comb * user * sys * (median of *) (glob)
249 284
250 285 test json output
251 286 ----------------
252 287
253 288 normal output:
254 289
255 290 $ hg perfheads --template json --config perf.stub=no
256 291 [
257 292 {
258 293 "comb": *, (glob)
259 294 "count": *, (glob)
260 295 "sys": *, (glob)
261 296 "user": *, (glob)
262 297 "wall": * (glob)
263 298 }
264 299 ]
265 300
266 301 detailed output:
267 302
268 303 $ hg perfheads --template json --config perf.all-timing=yes --config perf.stub=no
269 304 [
270 305 {
271 306 "avg.comb": *, (glob)
272 307 "avg.count": *, (glob)
273 308 "avg.sys": *, (glob)
274 309 "avg.user": *, (glob)
275 310 "avg.wall": *, (glob)
276 311 "comb": *, (glob)
277 312 "count": *, (glob)
278 313 "max.comb": *, (glob)
279 314 "max.count": *, (glob)
280 315 "max.sys": *, (glob)
281 316 "max.user": *, (glob)
282 317 "max.wall": *, (glob)
283 318 "median.comb": *, (glob)
284 319 "median.count": *, (glob)
285 320 "median.sys": *, (glob)
286 321 "median.user": *, (glob)
287 322 "median.wall": *, (glob)
288 323 "sys": *, (glob)
289 324 "user": *, (glob)
290 325 "wall": * (glob)
291 326 }
292 327 ]
293 328
294 329 Check perf.py for historical portability
295 330 ----------------------------------------
296 331
297 332 $ cd "$TESTDIR/.."
298 333
299 334 $ (testrepohg files -r 1.2 glob:mercurial/*.c glob:mercurial/*.py;
300 335 > testrepohg files -r tip glob:mercurial/*.c glob:mercurial/*.py) |
301 336 > "$TESTDIR"/check-perf-code.py contrib/perf.py
302 337 contrib/perf.py:\d+: (re)
303 338 > from mercurial import (
304 339 import newer module separately in try clause for early Mercurial
305 340 contrib/perf.py:\d+: (re)
306 341 > from mercurial import (
307 342 import newer module separately in try clause for early Mercurial
308 343 contrib/perf.py:\d+: (re)
309 344 > origindexpath = orig.opener.join(orig.indexfile)
310 345 use getvfs()/getsvfs() for early Mercurial
311 346 contrib/perf.py:\d+: (re)
312 347 > origdatapath = orig.opener.join(orig.datafile)
313 348 use getvfs()/getsvfs() for early Mercurial
314 349 contrib/perf.py:\d+: (re)
315 350 > vfs = vfsmod.vfs(tmpdir)
316 351 use getvfs()/getsvfs() for early Mercurial
317 352 contrib/perf.py:\d+: (re)
318 353 > vfs.options = getattr(orig.opener, 'options', None)
319 354 use getvfs()/getsvfs() for early Mercurial
320 355 [1]
General Comments 0
You need to be logged in to leave comments. Login now