##// END OF EJS Templates
perf: add a `pre-run` option...
marmoute -
r42551:563cd9a7 default
parent child Browse files
Show More
@@ -1,2863 +1,2875
1 1 # perf.py - performance test routines
2 2 '''helper extension to measure performance
3 3
4 4 Configurations
5 5 ==============
6 6
7 7 ``perf``
8 8 --------
9 9
10 10 ``all-timing``
11 11 When set, additional statistics will be reported for each benchmark: best,
12 12 worst, median average. If not set only the best timing is reported
13 13 (default: off).
14 14
15 15 ``presleep``
16 16 number of second to wait before any group of runs (default: 1)
17 17
18 ``pre-run``
19 number of run to perform before starting measurement.
20
18 21 ``run-limits``
19 22 Control the number of runs each benchmark will perform. The option value
20 23 should be a list of `<time>-<numberofrun>` pairs. After each run the
21 24 conditions are considered in order with the following logic:
22 25
23 26 If benchmark has been running for <time> seconds, and we have performed
24 27 <numberofrun> iterations, stop the benchmark,
25 28
26 29 The default value is: `3.0-100, 10.0-3`
27 30
28 31 ``stub``
29 32 When set, benchmarks will only be run once, useful for testing
30 33 (default: off)
31 34 '''
32 35
33 36 # "historical portability" policy of perf.py:
34 37 #
35 38 # We have to do:
36 39 # - make perf.py "loadable" with as wide Mercurial version as possible
37 40 # This doesn't mean that perf commands work correctly with that Mercurial.
38 41 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
39 42 # - make historical perf command work correctly with as wide Mercurial
40 43 # version as possible
41 44 #
42 45 # We have to do, if possible with reasonable cost:
43 46 # - make recent perf command for historical feature work correctly
44 47 # with early Mercurial
45 48 #
46 49 # We don't have to do:
47 50 # - make perf command for recent feature work correctly with early
48 51 # Mercurial
49 52
50 53 from __future__ import absolute_import
51 54 import contextlib
52 55 import functools
53 56 import gc
54 57 import os
55 58 import random
56 59 import shutil
57 60 import struct
58 61 import sys
59 62 import tempfile
60 63 import threading
61 64 import time
62 65 from mercurial import (
63 66 changegroup,
64 67 cmdutil,
65 68 commands,
66 69 copies,
67 70 error,
68 71 extensions,
69 72 hg,
70 73 mdiff,
71 74 merge,
72 75 revlog,
73 76 util,
74 77 )
75 78
76 79 # for "historical portability":
77 80 # try to import modules separately (in dict order), and ignore
78 81 # failure, because these aren't available with early Mercurial
79 82 try:
80 83 from mercurial import branchmap # since 2.5 (or bcee63733aad)
81 84 except ImportError:
82 85 pass
83 86 try:
84 87 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
85 88 except ImportError:
86 89 pass
87 90 try:
88 91 from mercurial import registrar # since 3.7 (or 37d50250b696)
89 92 dir(registrar) # forcibly load it
90 93 except ImportError:
91 94 registrar = None
92 95 try:
93 96 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
94 97 except ImportError:
95 98 pass
96 99 try:
97 100 from mercurial.utils import repoviewutil # since 5.0
98 101 except ImportError:
99 102 repoviewutil = None
100 103 try:
101 104 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
102 105 except ImportError:
103 106 pass
104 107 try:
105 108 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
106 109 except ImportError:
107 110 pass
108 111
109 112
110 113 def identity(a):
111 114 return a
112 115
113 116 try:
114 117 from mercurial import pycompat
115 118 getargspec = pycompat.getargspec # added to module after 4.5
116 119 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
117 120 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
118 121 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
119 122 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
120 123 if pycompat.ispy3:
121 124 _maxint = sys.maxsize # per py3 docs for replacing maxint
122 125 else:
123 126 _maxint = sys.maxint
124 127 except (ImportError, AttributeError):
125 128 import inspect
126 129 getargspec = inspect.getargspec
127 130 _byteskwargs = identity
128 131 fsencode = identity # no py3 support
129 132 _maxint = sys.maxint # no py3 support
130 133 _sysstr = lambda x: x # no py3 support
131 134 _xrange = xrange
132 135
133 136 try:
134 137 # 4.7+
135 138 queue = pycompat.queue.Queue
136 139 except (AttributeError, ImportError):
137 140 # <4.7.
138 141 try:
139 142 queue = pycompat.queue
140 143 except (AttributeError, ImportError):
141 144 queue = util.queue
142 145
143 146 try:
144 147 from mercurial import logcmdutil
145 148 makelogtemplater = logcmdutil.maketemplater
146 149 except (AttributeError, ImportError):
147 150 try:
148 151 makelogtemplater = cmdutil.makelogtemplater
149 152 except (AttributeError, ImportError):
150 153 makelogtemplater = None
151 154
152 155 # for "historical portability":
153 156 # define util.safehasattr forcibly, because util.safehasattr has been
154 157 # available since 1.9.3 (or 94b200a11cf7)
155 158 _undefined = object()
156 159 def safehasattr(thing, attr):
157 160 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
158 161 setattr(util, 'safehasattr', safehasattr)
159 162
160 163 # for "historical portability":
161 164 # define util.timer forcibly, because util.timer has been available
162 165 # since ae5d60bb70c9
163 166 if safehasattr(time, 'perf_counter'):
164 167 util.timer = time.perf_counter
165 168 elif os.name == b'nt':
166 169 util.timer = time.clock
167 170 else:
168 171 util.timer = time.time
169 172
170 173 # for "historical portability":
171 174 # use locally defined empty option list, if formatteropts isn't
172 175 # available, because commands.formatteropts has been available since
173 176 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
174 177 # available since 2.2 (or ae5f92e154d3)
175 178 formatteropts = getattr(cmdutil, "formatteropts",
176 179 getattr(commands, "formatteropts", []))
177 180
178 181 # for "historical portability":
179 182 # use locally defined option list, if debugrevlogopts isn't available,
180 183 # because commands.debugrevlogopts has been available since 3.7 (or
181 184 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
182 185 # since 1.9 (or a79fea6b3e77).
183 186 revlogopts = getattr(cmdutil, "debugrevlogopts",
184 187 getattr(commands, "debugrevlogopts", [
185 188 (b'c', b'changelog', False, (b'open changelog')),
186 189 (b'm', b'manifest', False, (b'open manifest')),
187 190 (b'', b'dir', False, (b'open directory manifest')),
188 191 ]))
189 192
190 193 cmdtable = {}
191 194
192 195 # for "historical portability":
193 196 # define parsealiases locally, because cmdutil.parsealiases has been
194 197 # available since 1.5 (or 6252852b4332)
195 198 def parsealiases(cmd):
196 199 return cmd.split(b"|")
197 200
198 201 if safehasattr(registrar, 'command'):
199 202 command = registrar.command(cmdtable)
200 203 elif safehasattr(cmdutil, 'command'):
201 204 command = cmdutil.command(cmdtable)
202 205 if b'norepo' not in getargspec(command).args:
203 206 # for "historical portability":
204 207 # wrap original cmdutil.command, because "norepo" option has
205 208 # been available since 3.1 (or 75a96326cecb)
206 209 _command = command
207 210 def command(name, options=(), synopsis=None, norepo=False):
208 211 if norepo:
209 212 commands.norepo += b' %s' % b' '.join(parsealiases(name))
210 213 return _command(name, list(options), synopsis)
211 214 else:
212 215 # for "historical portability":
213 216 # define "@command" annotation locally, because cmdutil.command
214 217 # has been available since 1.9 (or 2daa5179e73f)
215 218 def command(name, options=(), synopsis=None, norepo=False):
216 219 def decorator(func):
217 220 if synopsis:
218 221 cmdtable[name] = func, list(options), synopsis
219 222 else:
220 223 cmdtable[name] = func, list(options)
221 224 if norepo:
222 225 commands.norepo += b' %s' % b' '.join(parsealiases(name))
223 226 return func
224 227 return decorator
225 228
226 229 try:
227 230 import mercurial.registrar
228 231 import mercurial.configitems
229 232 configtable = {}
230 233 configitem = mercurial.registrar.configitem(configtable)
231 234 configitem(b'perf', b'presleep',
232 235 default=mercurial.configitems.dynamicdefault,
233 236 )
234 237 configitem(b'perf', b'stub',
235 238 default=mercurial.configitems.dynamicdefault,
236 239 )
237 240 configitem(b'perf', b'parentscount',
238 241 default=mercurial.configitems.dynamicdefault,
239 242 )
240 243 configitem(b'perf', b'all-timing',
241 244 default=mercurial.configitems.dynamicdefault,
242 245 )
246 configitem(b'perf', b'pre-run',
247 default=mercurial.configitems.dynamicdefault,
248 )
243 249 configitem(b'perf', b'run-limits',
244 250 default=mercurial.configitems.dynamicdefault,
245 251 )
246 252 except (ImportError, AttributeError):
247 253 pass
248 254
249 255 def getlen(ui):
250 256 if ui.configbool(b"perf", b"stub", False):
251 257 return lambda x: 1
252 258 return len
253 259
254 260 def gettimer(ui, opts=None):
255 261 """return a timer function and formatter: (timer, formatter)
256 262
257 263 This function exists to gather the creation of formatter in a single
258 264 place instead of duplicating it in all performance commands."""
259 265
260 266 # enforce an idle period before execution to counteract power management
261 267 # experimental config: perf.presleep
262 268 time.sleep(getint(ui, b"perf", b"presleep", 1))
263 269
264 270 if opts is None:
265 271 opts = {}
266 272 # redirect all to stderr unless buffer api is in use
267 273 if not ui._buffers:
268 274 ui = ui.copy()
269 275 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
270 276 if uifout:
271 277 # for "historical portability":
272 278 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
273 279 uifout.set(ui.ferr)
274 280
275 281 # get a formatter
276 282 uiformatter = getattr(ui, 'formatter', None)
277 283 if uiformatter:
278 284 fm = uiformatter(b'perf', opts)
279 285 else:
280 286 # for "historical portability":
281 287 # define formatter locally, because ui.formatter has been
282 288 # available since 2.2 (or ae5f92e154d3)
283 289 from mercurial import node
284 290 class defaultformatter(object):
285 291 """Minimized composition of baseformatter and plainformatter
286 292 """
287 293 def __init__(self, ui, topic, opts):
288 294 self._ui = ui
289 295 if ui.debugflag:
290 296 self.hexfunc = node.hex
291 297 else:
292 298 self.hexfunc = node.short
293 299 def __nonzero__(self):
294 300 return False
295 301 __bool__ = __nonzero__
296 302 def startitem(self):
297 303 pass
298 304 def data(self, **data):
299 305 pass
300 306 def write(self, fields, deftext, *fielddata, **opts):
301 307 self._ui.write(deftext % fielddata, **opts)
302 308 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
303 309 if cond:
304 310 self._ui.write(deftext % fielddata, **opts)
305 311 def plain(self, text, **opts):
306 312 self._ui.write(text, **opts)
307 313 def end(self):
308 314 pass
309 315 fm = defaultformatter(ui, b'perf', opts)
310 316
311 317 # stub function, runs code only once instead of in a loop
312 318 # experimental config: perf.stub
313 319 if ui.configbool(b"perf", b"stub", False):
314 320 return functools.partial(stub_timer, fm), fm
315 321
316 322 # experimental config: perf.all-timing
317 323 displayall = ui.configbool(b"perf", b"all-timing", False)
318 324
319 325 # experimental config: perf.run-limits
320 326 limitspec = ui.configlist(b"perf", b"run-limits", [])
321 327 limits = []
322 328 for item in limitspec:
323 329 parts = item.split(b'-', 1)
324 330 if len(parts) < 2:
325 331 ui.warn((b'malformatted run limit entry, missing "-": %s\n'
326 332 % item))
327 333 continue
328 334 try:
329 335 time_limit = float(pycompat.sysstr(parts[0]))
330 336 except ValueError as e:
331 337 ui.warn((b'malformatted run limit entry, %s: %s\n'
332 338 % (pycompat.bytestr(e), item)))
333 339 continue
334 340 try:
335 341 run_limit = int(pycompat.sysstr(parts[1]))
336 342 except ValueError as e:
337 343 ui.warn((b'malformatted run limit entry, %s: %s\n'
338 344 % (pycompat.bytestr(e), item)))
339 345 continue
340 346 limits.append((time_limit, run_limit))
341 347 if not limits:
342 348 limits = DEFAULTLIMITS
343 349
344 t = functools.partial(_timer, fm, displayall=displayall, limits=limits)
350 prerun = getint(ui, b"perf", b"pre-run", 0)
351 t = functools.partial(_timer, fm, displayall=displayall, limits=limits,
352 prerun=prerun)
345 353 return t, fm
346 354
347 355 def stub_timer(fm, func, setup=None, title=None):
348 356 if setup is not None:
349 357 setup()
350 358 func()
351 359
352 360 @contextlib.contextmanager
353 361 def timeone():
354 362 r = []
355 363 ostart = os.times()
356 364 cstart = util.timer()
357 365 yield r
358 366 cstop = util.timer()
359 367 ostop = os.times()
360 368 a, b = ostart, ostop
361 369 r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
362 370
363 371
364 372 # list of stop condition (elapsed time, minimal run count)
365 373 DEFAULTLIMITS = (
366 374 (3.0, 100),
367 375 (10.0, 3),
368 376 )
369 377
370 378 def _timer(fm, func, setup=None, title=None, displayall=False,
371 limits=DEFAULTLIMITS):
379 limits=DEFAULTLIMITS, prerun=0):
372 380 gc.collect()
373 381 results = []
374 382 begin = util.timer()
375 383 count = 0
384 for i in xrange(prerun):
385 if setup is not None:
386 setup()
387 func()
376 388 keepgoing = True
377 389 while keepgoing:
378 390 if setup is not None:
379 391 setup()
380 392 with timeone() as item:
381 393 r = func()
382 394 count += 1
383 395 results.append(item[0])
384 396 cstop = util.timer()
385 397 # Look for a stop condition.
386 398 elapsed = cstop - begin
387 399 for t, mincount in limits:
388 400 if elapsed >= t and count >= mincount:
389 401 keepgoing = False
390 402 break
391 403
392 404 formatone(fm, results, title=title, result=r,
393 405 displayall=displayall)
394 406
395 407 def formatone(fm, timings, title=None, result=None, displayall=False):
396 408
397 409 count = len(timings)
398 410
399 411 fm.startitem()
400 412
401 413 if title:
402 414 fm.write(b'title', b'! %s\n', title)
403 415 if result:
404 416 fm.write(b'result', b'! result: %s\n', result)
405 417 def display(role, entry):
406 418 prefix = b''
407 419 if role != b'best':
408 420 prefix = b'%s.' % role
409 421 fm.plain(b'!')
410 422 fm.write(prefix + b'wall', b' wall %f', entry[0])
411 423 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
412 424 fm.write(prefix + b'user', b' user %f', entry[1])
413 425 fm.write(prefix + b'sys', b' sys %f', entry[2])
414 426 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
415 427 fm.plain(b'\n')
416 428 timings.sort()
417 429 min_val = timings[0]
418 430 display(b'best', min_val)
419 431 if displayall:
420 432 max_val = timings[-1]
421 433 display(b'max', max_val)
422 434 avg = tuple([sum(x) / count for x in zip(*timings)])
423 435 display(b'avg', avg)
424 436 median = timings[len(timings) // 2]
425 437 display(b'median', median)
426 438
427 439 # utilities for historical portability
428 440
429 441 def getint(ui, section, name, default):
430 442 # for "historical portability":
431 443 # ui.configint has been available since 1.9 (or fa2b596db182)
432 444 v = ui.config(section, name, None)
433 445 if v is None:
434 446 return default
435 447 try:
436 448 return int(v)
437 449 except ValueError:
438 450 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
439 451 % (section, name, v))
440 452
441 453 def safeattrsetter(obj, name, ignoremissing=False):
442 454 """Ensure that 'obj' has 'name' attribute before subsequent setattr
443 455
444 456 This function is aborted, if 'obj' doesn't have 'name' attribute
445 457 at runtime. This avoids overlooking removal of an attribute, which
446 458 breaks assumption of performance measurement, in the future.
447 459
448 460 This function returns the object to (1) assign a new value, and
449 461 (2) restore an original value to the attribute.
450 462
451 463 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
452 464 abortion, and this function returns None. This is useful to
453 465 examine an attribute, which isn't ensured in all Mercurial
454 466 versions.
455 467 """
456 468 if not util.safehasattr(obj, name):
457 469 if ignoremissing:
458 470 return None
459 471 raise error.Abort((b"missing attribute %s of %s might break assumption"
460 472 b" of performance measurement") % (name, obj))
461 473
462 474 origvalue = getattr(obj, _sysstr(name))
463 475 class attrutil(object):
464 476 def set(self, newvalue):
465 477 setattr(obj, _sysstr(name), newvalue)
466 478 def restore(self):
467 479 setattr(obj, _sysstr(name), origvalue)
468 480
469 481 return attrutil()
470 482
471 483 # utilities to examine each internal API changes
472 484
473 485 def getbranchmapsubsettable():
474 486 # for "historical portability":
475 487 # subsettable is defined in:
476 488 # - branchmap since 2.9 (or 175c6fd8cacc)
477 489 # - repoview since 2.5 (or 59a9f18d4587)
478 490 # - repoviewutil since 5.0
479 491 for mod in (branchmap, repoview, repoviewutil):
480 492 subsettable = getattr(mod, 'subsettable', None)
481 493 if subsettable:
482 494 return subsettable
483 495
484 496 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
485 497 # branchmap and repoview modules exist, but subsettable attribute
486 498 # doesn't)
487 499 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
488 500 hint=b"use 2.5 or later")
489 501
490 502 def getsvfs(repo):
491 503 """Return appropriate object to access files under .hg/store
492 504 """
493 505 # for "historical portability":
494 506 # repo.svfs has been available since 2.3 (or 7034365089bf)
495 507 svfs = getattr(repo, 'svfs', None)
496 508 if svfs:
497 509 return svfs
498 510 else:
499 511 return getattr(repo, 'sopener')
500 512
501 513 def getvfs(repo):
502 514 """Return appropriate object to access files under .hg
503 515 """
504 516 # for "historical portability":
505 517 # repo.vfs has been available since 2.3 (or 7034365089bf)
506 518 vfs = getattr(repo, 'vfs', None)
507 519 if vfs:
508 520 return vfs
509 521 else:
510 522 return getattr(repo, 'opener')
511 523
512 524 def repocleartagscachefunc(repo):
513 525 """Return the function to clear tags cache according to repo internal API
514 526 """
515 527 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
516 528 # in this case, setattr(repo, '_tagscache', None) or so isn't
517 529 # correct way to clear tags cache, because existing code paths
518 530 # expect _tagscache to be a structured object.
519 531 def clearcache():
520 532 # _tagscache has been filteredpropertycache since 2.5 (or
521 533 # 98c867ac1330), and delattr() can't work in such case
522 534 if b'_tagscache' in vars(repo):
523 535 del repo.__dict__[b'_tagscache']
524 536 return clearcache
525 537
526 538 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
527 539 if repotags: # since 1.4 (or 5614a628d173)
528 540 return lambda : repotags.set(None)
529 541
530 542 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
531 543 if repotagscache: # since 0.6 (or d7df759d0e97)
532 544 return lambda : repotagscache.set(None)
533 545
534 546 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
535 547 # this point, but it isn't so problematic, because:
536 548 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
537 549 # in perftags() causes failure soon
538 550 # - perf.py itself has been available since 1.1 (or eb240755386d)
539 551 raise error.Abort((b"tags API of this hg command is unknown"))
540 552
541 553 # utilities to clear cache
542 554
543 555 def clearfilecache(obj, attrname):
544 556 unfiltered = getattr(obj, 'unfiltered', None)
545 557 if unfiltered is not None:
546 558 obj = obj.unfiltered()
547 559 if attrname in vars(obj):
548 560 delattr(obj, attrname)
549 561 obj._filecache.pop(attrname, None)
550 562
551 563 def clearchangelog(repo):
552 564 if repo is not repo.unfiltered():
553 565 object.__setattr__(repo, r'_clcachekey', None)
554 566 object.__setattr__(repo, r'_clcache', None)
555 567 clearfilecache(repo.unfiltered(), 'changelog')
556 568
557 569 # perf commands
558 570
559 571 @command(b'perfwalk', formatteropts)
560 572 def perfwalk(ui, repo, *pats, **opts):
561 573 opts = _byteskwargs(opts)
562 574 timer, fm = gettimer(ui, opts)
563 575 m = scmutil.match(repo[None], pats, {})
564 576 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
565 577 ignored=False))))
566 578 fm.end()
567 579
568 580 @command(b'perfannotate', formatteropts)
569 581 def perfannotate(ui, repo, f, **opts):
570 582 opts = _byteskwargs(opts)
571 583 timer, fm = gettimer(ui, opts)
572 584 fc = repo[b'.'][f]
573 585 timer(lambda: len(fc.annotate(True)))
574 586 fm.end()
575 587
576 588 @command(b'perfstatus',
577 589 [(b'u', b'unknown', False,
578 590 b'ask status to look for unknown files')] + formatteropts)
579 591 def perfstatus(ui, repo, **opts):
580 592 opts = _byteskwargs(opts)
581 593 #m = match.always(repo.root, repo.getcwd())
582 594 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
583 595 # False))))
584 596 timer, fm = gettimer(ui, opts)
585 597 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
586 598 fm.end()
587 599
588 600 @command(b'perfaddremove', formatteropts)
589 601 def perfaddremove(ui, repo, **opts):
590 602 opts = _byteskwargs(opts)
591 603 timer, fm = gettimer(ui, opts)
592 604 try:
593 605 oldquiet = repo.ui.quiet
594 606 repo.ui.quiet = True
595 607 matcher = scmutil.match(repo[None])
596 608 opts[b'dry_run'] = True
597 609 if b'uipathfn' in getargspec(scmutil.addremove).args:
598 610 uipathfn = scmutil.getuipathfn(repo)
599 611 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
600 612 else:
601 613 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
602 614 finally:
603 615 repo.ui.quiet = oldquiet
604 616 fm.end()
605 617
606 618 def clearcaches(cl):
607 619 # behave somewhat consistently across internal API changes
608 620 if util.safehasattr(cl, b'clearcaches'):
609 621 cl.clearcaches()
610 622 elif util.safehasattr(cl, b'_nodecache'):
611 623 from mercurial.node import nullid, nullrev
612 624 cl._nodecache = {nullid: nullrev}
613 625 cl._nodepos = None
614 626
615 627 @command(b'perfheads', formatteropts)
616 628 def perfheads(ui, repo, **opts):
617 629 """benchmark the computation of a changelog heads"""
618 630 opts = _byteskwargs(opts)
619 631 timer, fm = gettimer(ui, opts)
620 632 cl = repo.changelog
621 633 def s():
622 634 clearcaches(cl)
623 635 def d():
624 636 len(cl.headrevs())
625 637 timer(d, setup=s)
626 638 fm.end()
627 639
628 640 @command(b'perftags', formatteropts+
629 641 [
630 642 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
631 643 ])
632 644 def perftags(ui, repo, **opts):
633 645 opts = _byteskwargs(opts)
634 646 timer, fm = gettimer(ui, opts)
635 647 repocleartagscache = repocleartagscachefunc(repo)
636 648 clearrevlogs = opts[b'clear_revlogs']
637 649 def s():
638 650 if clearrevlogs:
639 651 clearchangelog(repo)
640 652 clearfilecache(repo.unfiltered(), 'manifest')
641 653 repocleartagscache()
642 654 def t():
643 655 return len(repo.tags())
644 656 timer(t, setup=s)
645 657 fm.end()
646 658
647 659 @command(b'perfancestors', formatteropts)
648 660 def perfancestors(ui, repo, **opts):
649 661 opts = _byteskwargs(opts)
650 662 timer, fm = gettimer(ui, opts)
651 663 heads = repo.changelog.headrevs()
652 664 def d():
653 665 for a in repo.changelog.ancestors(heads):
654 666 pass
655 667 timer(d)
656 668 fm.end()
657 669
658 670 @command(b'perfancestorset', formatteropts)
659 671 def perfancestorset(ui, repo, revset, **opts):
660 672 opts = _byteskwargs(opts)
661 673 timer, fm = gettimer(ui, opts)
662 674 revs = repo.revs(revset)
663 675 heads = repo.changelog.headrevs()
664 676 def d():
665 677 s = repo.changelog.ancestors(heads)
666 678 for rev in revs:
667 679 rev in s
668 680 timer(d)
669 681 fm.end()
670 682
671 683 @command(b'perfdiscovery', formatteropts, b'PATH')
672 684 def perfdiscovery(ui, repo, path, **opts):
673 685 """benchmark discovery between local repo and the peer at given path
674 686 """
675 687 repos = [repo, None]
676 688 timer, fm = gettimer(ui, opts)
677 689 path = ui.expandpath(path)
678 690
679 691 def s():
680 692 repos[1] = hg.peer(ui, opts, path)
681 693 def d():
682 694 setdiscovery.findcommonheads(ui, *repos)
683 695 timer(d, setup=s)
684 696 fm.end()
685 697
686 698 @command(b'perfbookmarks', formatteropts +
687 699 [
688 700 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
689 701 ])
690 702 def perfbookmarks(ui, repo, **opts):
691 703 """benchmark parsing bookmarks from disk to memory"""
692 704 opts = _byteskwargs(opts)
693 705 timer, fm = gettimer(ui, opts)
694 706
695 707 clearrevlogs = opts[b'clear_revlogs']
696 708 def s():
697 709 if clearrevlogs:
698 710 clearchangelog(repo)
699 711 clearfilecache(repo, b'_bookmarks')
700 712 def d():
701 713 repo._bookmarks
702 714 timer(d, setup=s)
703 715 fm.end()
704 716
705 717 @command(b'perfbundleread', formatteropts, b'BUNDLE')
706 718 def perfbundleread(ui, repo, bundlepath, **opts):
707 719 """Benchmark reading of bundle files.
708 720
709 721 This command is meant to isolate the I/O part of bundle reading as
710 722 much as possible.
711 723 """
712 724 from mercurial import (
713 725 bundle2,
714 726 exchange,
715 727 streamclone,
716 728 )
717 729
718 730 opts = _byteskwargs(opts)
719 731
720 732 def makebench(fn):
721 733 def run():
722 734 with open(bundlepath, b'rb') as fh:
723 735 bundle = exchange.readbundle(ui, fh, bundlepath)
724 736 fn(bundle)
725 737
726 738 return run
727 739
728 740 def makereadnbytes(size):
729 741 def run():
730 742 with open(bundlepath, b'rb') as fh:
731 743 bundle = exchange.readbundle(ui, fh, bundlepath)
732 744 while bundle.read(size):
733 745 pass
734 746
735 747 return run
736 748
737 749 def makestdioread(size):
738 750 def run():
739 751 with open(bundlepath, b'rb') as fh:
740 752 while fh.read(size):
741 753 pass
742 754
743 755 return run
744 756
745 757 # bundle1
746 758
747 759 def deltaiter(bundle):
748 760 for delta in bundle.deltaiter():
749 761 pass
750 762
751 763 def iterchunks(bundle):
752 764 for chunk in bundle.getchunks():
753 765 pass
754 766
755 767 # bundle2
756 768
757 769 def forwardchunks(bundle):
758 770 for chunk in bundle._forwardchunks():
759 771 pass
760 772
761 773 def iterparts(bundle):
762 774 for part in bundle.iterparts():
763 775 pass
764 776
765 777 def iterpartsseekable(bundle):
766 778 for part in bundle.iterparts(seekable=True):
767 779 pass
768 780
769 781 def seek(bundle):
770 782 for part in bundle.iterparts(seekable=True):
771 783 part.seek(0, os.SEEK_END)
772 784
773 785 def makepartreadnbytes(size):
774 786 def run():
775 787 with open(bundlepath, b'rb') as fh:
776 788 bundle = exchange.readbundle(ui, fh, bundlepath)
777 789 for part in bundle.iterparts():
778 790 while part.read(size):
779 791 pass
780 792
781 793 return run
782 794
783 795 benches = [
784 796 (makestdioread(8192), b'read(8k)'),
785 797 (makestdioread(16384), b'read(16k)'),
786 798 (makestdioread(32768), b'read(32k)'),
787 799 (makestdioread(131072), b'read(128k)'),
788 800 ]
789 801
790 802 with open(bundlepath, b'rb') as fh:
791 803 bundle = exchange.readbundle(ui, fh, bundlepath)
792 804
793 805 if isinstance(bundle, changegroup.cg1unpacker):
794 806 benches.extend([
795 807 (makebench(deltaiter), b'cg1 deltaiter()'),
796 808 (makebench(iterchunks), b'cg1 getchunks()'),
797 809 (makereadnbytes(8192), b'cg1 read(8k)'),
798 810 (makereadnbytes(16384), b'cg1 read(16k)'),
799 811 (makereadnbytes(32768), b'cg1 read(32k)'),
800 812 (makereadnbytes(131072), b'cg1 read(128k)'),
801 813 ])
802 814 elif isinstance(bundle, bundle2.unbundle20):
803 815 benches.extend([
804 816 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
805 817 (makebench(iterparts), b'bundle2 iterparts()'),
806 818 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
807 819 (makebench(seek), b'bundle2 part seek()'),
808 820 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
809 821 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
810 822 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
811 823 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
812 824 ])
813 825 elif isinstance(bundle, streamclone.streamcloneapplier):
814 826 raise error.Abort(b'stream clone bundles not supported')
815 827 else:
816 828 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
817 829
818 830 for fn, title in benches:
819 831 timer, fm = gettimer(ui, opts)
820 832 timer(fn, title=title)
821 833 fm.end()
822 834
823 835 @command(b'perfchangegroupchangelog', formatteropts +
824 836 [(b'', b'cgversion', b'02', b'changegroup version'),
825 837 (b'r', b'rev', b'', b'revisions to add to changegroup')])
826 838 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
827 839 """Benchmark producing a changelog group for a changegroup.
828 840
829 841 This measures the time spent processing the changelog during a
830 842 bundle operation. This occurs during `hg bundle` and on a server
831 843 processing a `getbundle` wire protocol request (handles clones
832 844 and pull requests).
833 845
834 846 By default, all revisions are added to the changegroup.
835 847 """
836 848 opts = _byteskwargs(opts)
837 849 cl = repo.changelog
838 850 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
839 851 bundler = changegroup.getbundler(cgversion, repo)
840 852
841 853 def d():
842 854 state, chunks = bundler._generatechangelog(cl, nodes)
843 855 for chunk in chunks:
844 856 pass
845 857
846 858 timer, fm = gettimer(ui, opts)
847 859
848 860 # Terminal printing can interfere with timing. So disable it.
849 861 with ui.configoverride({(b'progress', b'disable'): True}):
850 862 timer(d)
851 863
852 864 fm.end()
853 865
854 866 @command(b'perfdirs', formatteropts)
855 867 def perfdirs(ui, repo, **opts):
856 868 opts = _byteskwargs(opts)
857 869 timer, fm = gettimer(ui, opts)
858 870 dirstate = repo.dirstate
859 871 b'a' in dirstate
860 872 def d():
861 873 dirstate.hasdir(b'a')
862 874 del dirstate._map._dirs
863 875 timer(d)
864 876 fm.end()
865 877
866 878 @command(b'perfdirstate', formatteropts)
867 879 def perfdirstate(ui, repo, **opts):
868 880 opts = _byteskwargs(opts)
869 881 timer, fm = gettimer(ui, opts)
870 882 b"a" in repo.dirstate
871 883 def d():
872 884 repo.dirstate.invalidate()
873 885 b"a" in repo.dirstate
874 886 timer(d)
875 887 fm.end()
876 888
877 889 @command(b'perfdirstatedirs', formatteropts)
878 890 def perfdirstatedirs(ui, repo, **opts):
879 891 opts = _byteskwargs(opts)
880 892 timer, fm = gettimer(ui, opts)
881 893 b"a" in repo.dirstate
882 894 def d():
883 895 repo.dirstate.hasdir(b"a")
884 896 del repo.dirstate._map._dirs
885 897 timer(d)
886 898 fm.end()
887 899
888 900 @command(b'perfdirstatefoldmap', formatteropts)
889 901 def perfdirstatefoldmap(ui, repo, **opts):
890 902 opts = _byteskwargs(opts)
891 903 timer, fm = gettimer(ui, opts)
892 904 dirstate = repo.dirstate
893 905 b'a' in dirstate
894 906 def d():
895 907 dirstate._map.filefoldmap.get(b'a')
896 908 del dirstate._map.filefoldmap
897 909 timer(d)
898 910 fm.end()
899 911
900 912 @command(b'perfdirfoldmap', formatteropts)
901 913 def perfdirfoldmap(ui, repo, **opts):
902 914 opts = _byteskwargs(opts)
903 915 timer, fm = gettimer(ui, opts)
904 916 dirstate = repo.dirstate
905 917 b'a' in dirstate
906 918 def d():
907 919 dirstate._map.dirfoldmap.get(b'a')
908 920 del dirstate._map.dirfoldmap
909 921 del dirstate._map._dirs
910 922 timer(d)
911 923 fm.end()
912 924
913 925 @command(b'perfdirstatewrite', formatteropts)
914 926 def perfdirstatewrite(ui, repo, **opts):
915 927 opts = _byteskwargs(opts)
916 928 timer, fm = gettimer(ui, opts)
917 929 ds = repo.dirstate
918 930 b"a" in ds
919 931 def d():
920 932 ds._dirty = True
921 933 ds.write(repo.currenttransaction())
922 934 timer(d)
923 935 fm.end()
924 936
925 937 @command(b'perfmergecalculate',
926 938 [(b'r', b'rev', b'.', b'rev to merge against')] + formatteropts)
927 939 def perfmergecalculate(ui, repo, rev, **opts):
928 940 opts = _byteskwargs(opts)
929 941 timer, fm = gettimer(ui, opts)
930 942 wctx = repo[None]
931 943 rctx = scmutil.revsingle(repo, rev, rev)
932 944 ancestor = wctx.ancestor(rctx)
933 945 # we don't want working dir files to be stat'd in the benchmark, so prime
934 946 # that cache
935 947 wctx.dirty()
936 948 def d():
937 949 # acceptremote is True because we don't want prompts in the middle of
938 950 # our benchmark
939 951 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
940 952 acceptremote=True, followcopies=True)
941 953 timer(d)
942 954 fm.end()
943 955
944 956 @command(b'perfpathcopies', [], b"REV REV")
945 957 def perfpathcopies(ui, repo, rev1, rev2, **opts):
946 958 """benchmark the copy tracing logic"""
947 959 opts = _byteskwargs(opts)
948 960 timer, fm = gettimer(ui, opts)
949 961 ctx1 = scmutil.revsingle(repo, rev1, rev1)
950 962 ctx2 = scmutil.revsingle(repo, rev2, rev2)
951 963 def d():
952 964 copies.pathcopies(ctx1, ctx2)
953 965 timer(d)
954 966 fm.end()
955 967
956 968 @command(b'perfphases',
957 969 [(b'', b'full', False, b'include file reading time too'),
958 970 ], b"")
959 971 def perfphases(ui, repo, **opts):
960 972 """benchmark phasesets computation"""
961 973 opts = _byteskwargs(opts)
962 974 timer, fm = gettimer(ui, opts)
963 975 _phases = repo._phasecache
964 976 full = opts.get(b'full')
965 977 def d():
966 978 phases = _phases
967 979 if full:
968 980 clearfilecache(repo, b'_phasecache')
969 981 phases = repo._phasecache
970 982 phases.invalidate()
971 983 phases.loadphaserevs(repo)
972 984 timer(d)
973 985 fm.end()
974 986
975 987 @command(b'perfphasesremote',
976 988 [], b"[DEST]")
977 989 def perfphasesremote(ui, repo, dest=None, **opts):
978 990 """benchmark time needed to analyse phases of the remote server"""
979 991 from mercurial.node import (
980 992 bin,
981 993 )
982 994 from mercurial import (
983 995 exchange,
984 996 hg,
985 997 phases,
986 998 )
987 999 opts = _byteskwargs(opts)
988 1000 timer, fm = gettimer(ui, opts)
989 1001
990 1002 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
991 1003 if not path:
992 1004 raise error.Abort((b'default repository not configured!'),
993 1005 hint=(b"see 'hg help config.paths'"))
994 1006 dest = path.pushloc or path.loc
995 1007 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
996 1008 other = hg.peer(repo, opts, dest)
997 1009
998 1010 # easier to perform discovery through the operation
999 1011 op = exchange.pushoperation(repo, other)
1000 1012 exchange._pushdiscoverychangeset(op)
1001 1013
1002 1014 remotesubset = op.fallbackheads
1003 1015
1004 1016 with other.commandexecutor() as e:
1005 1017 remotephases = e.callcommand(b'listkeys',
1006 1018 {b'namespace': b'phases'}).result()
1007 1019 del other
1008 1020 publishing = remotephases.get(b'publishing', False)
1009 1021 if publishing:
1010 1022 ui.status((b'publishing: yes\n'))
1011 1023 else:
1012 1024 ui.status((b'publishing: no\n'))
1013 1025
1014 1026 nodemap = repo.changelog.nodemap
1015 1027 nonpublishroots = 0
1016 1028 for nhex, phase in remotephases.iteritems():
1017 1029 if nhex == b'publishing': # ignore data related to publish option
1018 1030 continue
1019 1031 node = bin(nhex)
1020 1032 if node in nodemap and int(phase):
1021 1033 nonpublishroots += 1
1022 1034 ui.status((b'number of roots: %d\n') % len(remotephases))
1023 1035 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
1024 1036 def d():
1025 1037 phases.remotephasessummary(repo,
1026 1038 remotesubset,
1027 1039 remotephases)
1028 1040 timer(d)
1029 1041 fm.end()
1030 1042
1031 1043 @command(b'perfmanifest',[
1032 1044 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1033 1045 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1034 1046 ] + formatteropts, b'REV|NODE')
1035 1047 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1036 1048 """benchmark the time to read a manifest from disk and return a usable
1037 1049 dict-like object
1038 1050
1039 1051 Manifest caches are cleared before retrieval."""
1040 1052 opts = _byteskwargs(opts)
1041 1053 timer, fm = gettimer(ui, opts)
1042 1054 if not manifest_rev:
1043 1055 ctx = scmutil.revsingle(repo, rev, rev)
1044 1056 t = ctx.manifestnode()
1045 1057 else:
1046 1058 from mercurial.node import bin
1047 1059
1048 1060 if len(rev) == 40:
1049 1061 t = bin(rev)
1050 1062 else:
1051 1063 try:
1052 1064 rev = int(rev)
1053 1065
1054 1066 if util.safehasattr(repo.manifestlog, b'getstorage'):
1055 1067 t = repo.manifestlog.getstorage(b'').node(rev)
1056 1068 else:
1057 1069 t = repo.manifestlog._revlog.lookup(rev)
1058 1070 except ValueError:
1059 1071 raise error.Abort(b'manifest revision must be integer or full '
1060 1072 b'node')
1061 1073 def d():
1062 1074 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1063 1075 repo.manifestlog[t].read()
1064 1076 timer(d)
1065 1077 fm.end()
1066 1078
1067 1079 @command(b'perfchangeset', formatteropts)
1068 1080 def perfchangeset(ui, repo, rev, **opts):
1069 1081 opts = _byteskwargs(opts)
1070 1082 timer, fm = gettimer(ui, opts)
1071 1083 n = scmutil.revsingle(repo, rev).node()
1072 1084 def d():
1073 1085 repo.changelog.read(n)
1074 1086 #repo.changelog._cache = None
1075 1087 timer(d)
1076 1088 fm.end()
1077 1089
1078 1090 @command(b'perfignore', formatteropts)
1079 1091 def perfignore(ui, repo, **opts):
1080 1092 """benchmark operation related to computing ignore"""
1081 1093 opts = _byteskwargs(opts)
1082 1094 timer, fm = gettimer(ui, opts)
1083 1095 dirstate = repo.dirstate
1084 1096
1085 1097 def setupone():
1086 1098 dirstate.invalidate()
1087 1099 clearfilecache(dirstate, b'_ignore')
1088 1100
1089 1101 def runone():
1090 1102 dirstate._ignore
1091 1103
1092 1104 timer(runone, setup=setupone, title=b"load")
1093 1105 fm.end()
1094 1106
1095 1107 @command(b'perfindex', [
1096 1108 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1097 1109 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1098 1110 ] + formatteropts)
1099 1111 def perfindex(ui, repo, **opts):
1100 1112 """benchmark index creation time followed by a lookup
1101 1113
1102 1114 The default is to look `tip` up. Depending on the index implementation,
1103 1115 the revision looked up can matters. For example, an implementation
1104 1116 scanning the index will have a faster lookup time for `--rev tip` than for
1105 1117 `--rev 0`. The number of looked up revisions and their order can also
1106 1118 matters.
1107 1119
1108 1120 Example of useful set to test:
1109 1121 * tip
1110 1122 * 0
1111 1123 * -10:
1112 1124 * :10
1113 1125 * -10: + :10
1114 1126 * :10: + -10:
1115 1127 * -10000:
1116 1128 * -10000: + 0
1117 1129
1118 1130 It is not currently possible to check for lookup of a missing node. For
1119 1131 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1120 1132 import mercurial.revlog
1121 1133 opts = _byteskwargs(opts)
1122 1134 timer, fm = gettimer(ui, opts)
1123 1135 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1124 1136 if opts[b'no_lookup']:
1125 1137 if opts['rev']:
1126 1138 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1127 1139 nodes = []
1128 1140 elif not opts[b'rev']:
1129 1141 nodes = [repo[b"tip"].node()]
1130 1142 else:
1131 1143 revs = scmutil.revrange(repo, opts[b'rev'])
1132 1144 cl = repo.changelog
1133 1145 nodes = [cl.node(r) for r in revs]
1134 1146
1135 1147 unfi = repo.unfiltered()
1136 1148 # find the filecache func directly
1137 1149 # This avoid polluting the benchmark with the filecache logic
1138 1150 makecl = unfi.__class__.changelog.func
1139 1151 def setup():
1140 1152 # probably not necessary, but for good measure
1141 1153 clearchangelog(unfi)
1142 1154 def d():
1143 1155 cl = makecl(unfi)
1144 1156 for n in nodes:
1145 1157 cl.rev(n)
1146 1158 timer(d, setup=setup)
1147 1159 fm.end()
1148 1160
1149 1161 @command(b'perfnodemap', [
1150 1162 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1151 1163 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1152 1164 ] + formatteropts)
1153 1165 def perfnodemap(ui, repo, **opts):
1154 1166 """benchmark the time necessary to look up revision from a cold nodemap
1155 1167
1156 1168 Depending on the implementation, the amount and order of revision we look
1157 1169 up can varies. Example of useful set to test:
1158 1170 * tip
1159 1171 * 0
1160 1172 * -10:
1161 1173 * :10
1162 1174 * -10: + :10
1163 1175 * :10: + -10:
1164 1176 * -10000:
1165 1177 * -10000: + 0
1166 1178
1167 1179 The command currently focus on valid binary lookup. Benchmarking for
1168 1180 hexlookup, prefix lookup and missing lookup would also be valuable.
1169 1181 """
1170 1182 import mercurial.revlog
1171 1183 opts = _byteskwargs(opts)
1172 1184 timer, fm = gettimer(ui, opts)
1173 1185 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1174 1186
1175 1187 unfi = repo.unfiltered()
1176 1188 clearcaches = opts['clear_caches']
1177 1189 # find the filecache func directly
1178 1190 # This avoid polluting the benchmark with the filecache logic
1179 1191 makecl = unfi.__class__.changelog.func
1180 1192 if not opts[b'rev']:
1181 1193 raise error.Abort('use --rev to specify revisions to look up')
1182 1194 revs = scmutil.revrange(repo, opts[b'rev'])
1183 1195 cl = repo.changelog
1184 1196 nodes = [cl.node(r) for r in revs]
1185 1197
1186 1198 # use a list to pass reference to a nodemap from one closure to the next
1187 1199 nodeget = [None]
1188 1200 def setnodeget():
1189 1201 # probably not necessary, but for good measure
1190 1202 clearchangelog(unfi)
1191 1203 nodeget[0] = makecl(unfi).nodemap.get
1192 1204
1193 1205 def d():
1194 1206 get = nodeget[0]
1195 1207 for n in nodes:
1196 1208 get(n)
1197 1209
1198 1210 setup = None
1199 1211 if clearcaches:
1200 1212 def setup():
1201 1213 setnodeget()
1202 1214 else:
1203 1215 setnodeget()
1204 1216 d() # prewarm the data structure
1205 1217 timer(d, setup=setup)
1206 1218 fm.end()
1207 1219
1208 1220 @command(b'perfstartup', formatteropts)
1209 1221 def perfstartup(ui, repo, **opts):
1210 1222 opts = _byteskwargs(opts)
1211 1223 timer, fm = gettimer(ui, opts)
1212 1224 def d():
1213 1225 if os.name != r'nt':
1214 1226 os.system(b"HGRCPATH= %s version -q > /dev/null" %
1215 1227 fsencode(sys.argv[0]))
1216 1228 else:
1217 1229 os.environ[r'HGRCPATH'] = r' '
1218 1230 os.system(r"%s version -q > NUL" % sys.argv[0])
1219 1231 timer(d)
1220 1232 fm.end()
1221 1233
1222 1234 @command(b'perfparents', formatteropts)
1223 1235 def perfparents(ui, repo, **opts):
1224 1236 """benchmark the time necessary to fetch one changeset's parents.
1225 1237
1226 1238 The fetch is done using the `node identifier`, traversing all object layers
1227 1239 from the repository object. The first N revisions will be used for this
1228 1240 benchmark. N is controlled by the ``perf.parentscount`` config option
1229 1241 (default: 1000).
1230 1242 """
1231 1243 opts = _byteskwargs(opts)
1232 1244 timer, fm = gettimer(ui, opts)
1233 1245 # control the number of commits perfparents iterates over
1234 1246 # experimental config: perf.parentscount
1235 1247 count = getint(ui, b"perf", b"parentscount", 1000)
1236 1248 if len(repo.changelog) < count:
1237 1249 raise error.Abort(b"repo needs %d commits for this test" % count)
1238 1250 repo = repo.unfiltered()
1239 1251 nl = [repo.changelog.node(i) for i in _xrange(count)]
1240 1252 def d():
1241 1253 for n in nl:
1242 1254 repo.changelog.parents(n)
1243 1255 timer(d)
1244 1256 fm.end()
1245 1257
1246 1258 @command(b'perfctxfiles', formatteropts)
1247 1259 def perfctxfiles(ui, repo, x, **opts):
1248 1260 opts = _byteskwargs(opts)
1249 1261 x = int(x)
1250 1262 timer, fm = gettimer(ui, opts)
1251 1263 def d():
1252 1264 len(repo[x].files())
1253 1265 timer(d)
1254 1266 fm.end()
1255 1267
1256 1268 @command(b'perfrawfiles', formatteropts)
1257 1269 def perfrawfiles(ui, repo, x, **opts):
1258 1270 opts = _byteskwargs(opts)
1259 1271 x = int(x)
1260 1272 timer, fm = gettimer(ui, opts)
1261 1273 cl = repo.changelog
1262 1274 def d():
1263 1275 len(cl.read(x)[3])
1264 1276 timer(d)
1265 1277 fm.end()
1266 1278
1267 1279 @command(b'perflookup', formatteropts)
1268 1280 def perflookup(ui, repo, rev, **opts):
1269 1281 opts = _byteskwargs(opts)
1270 1282 timer, fm = gettimer(ui, opts)
1271 1283 timer(lambda: len(repo.lookup(rev)))
1272 1284 fm.end()
1273 1285
1274 1286 @command(b'perflinelogedits',
1275 1287 [(b'n', b'edits', 10000, b'number of edits'),
1276 1288 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1277 1289 ], norepo=True)
1278 1290 def perflinelogedits(ui, **opts):
1279 1291 from mercurial import linelog
1280 1292
1281 1293 opts = _byteskwargs(opts)
1282 1294
1283 1295 edits = opts[b'edits']
1284 1296 maxhunklines = opts[b'max_hunk_lines']
1285 1297
1286 1298 maxb1 = 100000
1287 1299 random.seed(0)
1288 1300 randint = random.randint
1289 1301 currentlines = 0
1290 1302 arglist = []
1291 1303 for rev in _xrange(edits):
1292 1304 a1 = randint(0, currentlines)
1293 1305 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1294 1306 b1 = randint(0, maxb1)
1295 1307 b2 = randint(b1, b1 + maxhunklines)
1296 1308 currentlines += (b2 - b1) - (a2 - a1)
1297 1309 arglist.append((rev, a1, a2, b1, b2))
1298 1310
1299 1311 def d():
1300 1312 ll = linelog.linelog()
1301 1313 for args in arglist:
1302 1314 ll.replacelines(*args)
1303 1315
1304 1316 timer, fm = gettimer(ui, opts)
1305 1317 timer(d)
1306 1318 fm.end()
1307 1319
1308 1320 @command(b'perfrevrange', formatteropts)
1309 1321 def perfrevrange(ui, repo, *specs, **opts):
1310 1322 opts = _byteskwargs(opts)
1311 1323 timer, fm = gettimer(ui, opts)
1312 1324 revrange = scmutil.revrange
1313 1325 timer(lambda: len(revrange(repo, specs)))
1314 1326 fm.end()
1315 1327
1316 1328 @command(b'perfnodelookup', formatteropts)
1317 1329 def perfnodelookup(ui, repo, rev, **opts):
1318 1330 opts = _byteskwargs(opts)
1319 1331 timer, fm = gettimer(ui, opts)
1320 1332 import mercurial.revlog
1321 1333 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1322 1334 n = scmutil.revsingle(repo, rev).node()
1323 1335 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1324 1336 def d():
1325 1337 cl.rev(n)
1326 1338 clearcaches(cl)
1327 1339 timer(d)
1328 1340 fm.end()
1329 1341
1330 1342 @command(b'perflog',
1331 1343 [(b'', b'rename', False, b'ask log to follow renames')
1332 1344 ] + formatteropts)
1333 1345 def perflog(ui, repo, rev=None, **opts):
1334 1346 opts = _byteskwargs(opts)
1335 1347 if rev is None:
1336 1348 rev=[]
1337 1349 timer, fm = gettimer(ui, opts)
1338 1350 ui.pushbuffer()
1339 1351 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1340 1352 copies=opts.get(b'rename')))
1341 1353 ui.popbuffer()
1342 1354 fm.end()
1343 1355
1344 1356 @command(b'perfmoonwalk', formatteropts)
1345 1357 def perfmoonwalk(ui, repo, **opts):
1346 1358 """benchmark walking the changelog backwards
1347 1359
1348 1360 This also loads the changelog data for each revision in the changelog.
1349 1361 """
1350 1362 opts = _byteskwargs(opts)
1351 1363 timer, fm = gettimer(ui, opts)
1352 1364 def moonwalk():
1353 1365 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1354 1366 ctx = repo[i]
1355 1367 ctx.branch() # read changelog data (in addition to the index)
1356 1368 timer(moonwalk)
1357 1369 fm.end()
1358 1370
1359 1371 @command(b'perftemplating',
1360 1372 [(b'r', b'rev', [], b'revisions to run the template on'),
1361 1373 ] + formatteropts)
1362 1374 def perftemplating(ui, repo, testedtemplate=None, **opts):
1363 1375 """test the rendering time of a given template"""
1364 1376 if makelogtemplater is None:
1365 1377 raise error.Abort((b"perftemplating not available with this Mercurial"),
1366 1378 hint=b"use 4.3 or later")
1367 1379
1368 1380 opts = _byteskwargs(opts)
1369 1381
1370 1382 nullui = ui.copy()
1371 1383 nullui.fout = open(os.devnull, r'wb')
1372 1384 nullui.disablepager()
1373 1385 revs = opts.get(b'rev')
1374 1386 if not revs:
1375 1387 revs = [b'all()']
1376 1388 revs = list(scmutil.revrange(repo, revs))
1377 1389
1378 1390 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1379 1391 b' {author|person}: {desc|firstline}\n')
1380 1392 if testedtemplate is None:
1381 1393 testedtemplate = defaulttemplate
1382 1394 displayer = makelogtemplater(nullui, repo, testedtemplate)
1383 1395 def format():
1384 1396 for r in revs:
1385 1397 ctx = repo[r]
1386 1398 displayer.show(ctx)
1387 1399 displayer.flush(ctx)
1388 1400
1389 1401 timer, fm = gettimer(ui, opts)
1390 1402 timer(format)
1391 1403 fm.end()
1392 1404
1393 1405 @command(b'perfhelper-pathcopies', formatteropts +
1394 1406 [
1395 1407 (b'r', b'revs', [], b'restrict search to these revisions'),
1396 1408 (b'', b'timing', False, b'provides extra data (costly)'),
1397 1409 ])
1398 1410 def perfhelperpathcopies(ui, repo, revs=[], **opts):
1399 1411 """find statistic about potential parameters for the `perftracecopies`
1400 1412
1401 1413 This command find source-destination pair relevant for copytracing testing.
1402 1414 It report value for some of the parameters that impact copy tracing time.
1403 1415
1404 1416 If `--timing` is set, rename detection is run and the associated timing
1405 1417 will be reported. The extra details comes at the cost of a slower command
1406 1418 execution.
1407 1419
1408 1420 Since the rename detection is only run once, other factors might easily
1409 1421 affect the precision of the timing. However it should give a good
1410 1422 approximation of which revision pairs are very costly.
1411 1423 """
1412 1424 opts = _byteskwargs(opts)
1413 1425 fm = ui.formatter(b'perf', opts)
1414 1426 dotiming = opts[b'timing']
1415 1427
1416 1428 if dotiming:
1417 1429 header = '%12s %12s %12s %12s %12s %12s\n'
1418 1430 output = ("%(source)12s %(destination)12s "
1419 1431 "%(nbrevs)12d %(nbmissingfiles)12d "
1420 1432 "%(nbrenamedfiles)12d %(time)18.5f\n")
1421 1433 header_names = ("source", "destination", "nb-revs", "nb-files",
1422 1434 "nb-renames", "time")
1423 1435 fm.plain(header % header_names)
1424 1436 else:
1425 1437 header = '%12s %12s %12s %12s\n'
1426 1438 output = ("%(source)12s %(destination)12s "
1427 1439 "%(nbrevs)12d %(nbmissingfiles)12d\n")
1428 1440 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
1429 1441
1430 1442 if not revs:
1431 1443 revs = ['all()']
1432 1444 revs = scmutil.revrange(repo, revs)
1433 1445
1434 1446 roi = repo.revs('merge() and %ld', revs)
1435 1447 for r in roi:
1436 1448 ctx = repo[r]
1437 1449 p1 = ctx.p1().rev()
1438 1450 p2 = ctx.p2().rev()
1439 1451 bases = repo.changelog._commonancestorsheads(p1, p2)
1440 1452 for p in (p1, p2):
1441 1453 for b in bases:
1442 1454 base = repo[b]
1443 1455 parent = repo[p]
1444 1456 missing = copies._computeforwardmissing(base, parent)
1445 1457 if not missing:
1446 1458 continue
1447 1459 data = {
1448 1460 b'source': base.hex(),
1449 1461 b'destination': parent.hex(),
1450 1462 b'nbrevs': len(repo.revs('%d::%d', b, p)),
1451 1463 b'nbmissingfiles': len(missing),
1452 1464 }
1453 1465 if dotiming:
1454 1466 begin = util.timer()
1455 1467 renames = copies.pathcopies(base, parent)
1456 1468 end = util.timer()
1457 1469 # not very stable timing since we did only one run
1458 1470 data['time'] = end - begin
1459 1471 data['nbrenamedfiles'] = len(renames)
1460 1472 fm.startitem()
1461 1473 fm.data(**data)
1462 1474 out = data.copy()
1463 1475 out['source'] = fm.hexfunc(base.node())
1464 1476 out['destination'] = fm.hexfunc(parent.node())
1465 1477 fm.plain(output % out)
1466 1478
1467 1479 fm.end()
1468 1480
1469 1481 @command(b'perfcca', formatteropts)
1470 1482 def perfcca(ui, repo, **opts):
1471 1483 opts = _byteskwargs(opts)
1472 1484 timer, fm = gettimer(ui, opts)
1473 1485 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1474 1486 fm.end()
1475 1487
1476 1488 @command(b'perffncacheload', formatteropts)
1477 1489 def perffncacheload(ui, repo, **opts):
1478 1490 opts = _byteskwargs(opts)
1479 1491 timer, fm = gettimer(ui, opts)
1480 1492 s = repo.store
1481 1493 def d():
1482 1494 s.fncache._load()
1483 1495 timer(d)
1484 1496 fm.end()
1485 1497
1486 1498 @command(b'perffncachewrite', formatteropts)
1487 1499 def perffncachewrite(ui, repo, **opts):
1488 1500 opts = _byteskwargs(opts)
1489 1501 timer, fm = gettimer(ui, opts)
1490 1502 s = repo.store
1491 1503 lock = repo.lock()
1492 1504 s.fncache._load()
1493 1505 tr = repo.transaction(b'perffncachewrite')
1494 1506 tr.addbackup(b'fncache')
1495 1507 def d():
1496 1508 s.fncache._dirty = True
1497 1509 s.fncache.write(tr)
1498 1510 timer(d)
1499 1511 tr.close()
1500 1512 lock.release()
1501 1513 fm.end()
1502 1514
1503 1515 @command(b'perffncacheencode', formatteropts)
1504 1516 def perffncacheencode(ui, repo, **opts):
1505 1517 opts = _byteskwargs(opts)
1506 1518 timer, fm = gettimer(ui, opts)
1507 1519 s = repo.store
1508 1520 s.fncache._load()
1509 1521 def d():
1510 1522 for p in s.fncache.entries:
1511 1523 s.encode(p)
1512 1524 timer(d)
1513 1525 fm.end()
1514 1526
1515 1527 def _bdiffworker(q, blocks, xdiff, ready, done):
1516 1528 while not done.is_set():
1517 1529 pair = q.get()
1518 1530 while pair is not None:
1519 1531 if xdiff:
1520 1532 mdiff.bdiff.xdiffblocks(*pair)
1521 1533 elif blocks:
1522 1534 mdiff.bdiff.blocks(*pair)
1523 1535 else:
1524 1536 mdiff.textdiff(*pair)
1525 1537 q.task_done()
1526 1538 pair = q.get()
1527 1539 q.task_done() # for the None one
1528 1540 with ready:
1529 1541 ready.wait()
1530 1542
1531 1543 def _manifestrevision(repo, mnode):
1532 1544 ml = repo.manifestlog
1533 1545
1534 1546 if util.safehasattr(ml, b'getstorage'):
1535 1547 store = ml.getstorage(b'')
1536 1548 else:
1537 1549 store = ml._revlog
1538 1550
1539 1551 return store.revision(mnode)
1540 1552
1541 1553 @command(b'perfbdiff', revlogopts + formatteropts + [
1542 1554 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1543 1555 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1544 1556 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1545 1557 (b'', b'blocks', False, b'test computing diffs into blocks'),
1546 1558 (b'', b'xdiff', False, b'use xdiff algorithm'),
1547 1559 ],
1548 1560
1549 1561 b'-c|-m|FILE REV')
1550 1562 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1551 1563 """benchmark a bdiff between revisions
1552 1564
1553 1565 By default, benchmark a bdiff between its delta parent and itself.
1554 1566
1555 1567 With ``--count``, benchmark bdiffs between delta parents and self for N
1556 1568 revisions starting at the specified revision.
1557 1569
1558 1570 With ``--alldata``, assume the requested revision is a changeset and
1559 1571 measure bdiffs for all changes related to that changeset (manifest
1560 1572 and filelogs).
1561 1573 """
1562 1574 opts = _byteskwargs(opts)
1563 1575
1564 1576 if opts[b'xdiff'] and not opts[b'blocks']:
1565 1577 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1566 1578
1567 1579 if opts[b'alldata']:
1568 1580 opts[b'changelog'] = True
1569 1581
1570 1582 if opts.get(b'changelog') or opts.get(b'manifest'):
1571 1583 file_, rev = None, file_
1572 1584 elif rev is None:
1573 1585 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1574 1586
1575 1587 blocks = opts[b'blocks']
1576 1588 xdiff = opts[b'xdiff']
1577 1589 textpairs = []
1578 1590
1579 1591 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1580 1592
1581 1593 startrev = r.rev(r.lookup(rev))
1582 1594 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1583 1595 if opts[b'alldata']:
1584 1596 # Load revisions associated with changeset.
1585 1597 ctx = repo[rev]
1586 1598 mtext = _manifestrevision(repo, ctx.manifestnode())
1587 1599 for pctx in ctx.parents():
1588 1600 pman = _manifestrevision(repo, pctx.manifestnode())
1589 1601 textpairs.append((pman, mtext))
1590 1602
1591 1603 # Load filelog revisions by iterating manifest delta.
1592 1604 man = ctx.manifest()
1593 1605 pman = ctx.p1().manifest()
1594 1606 for filename, change in pman.diff(man).items():
1595 1607 fctx = repo.file(filename)
1596 1608 f1 = fctx.revision(change[0][0] or -1)
1597 1609 f2 = fctx.revision(change[1][0] or -1)
1598 1610 textpairs.append((f1, f2))
1599 1611 else:
1600 1612 dp = r.deltaparent(rev)
1601 1613 textpairs.append((r.revision(dp), r.revision(rev)))
1602 1614
1603 1615 withthreads = threads > 0
1604 1616 if not withthreads:
1605 1617 def d():
1606 1618 for pair in textpairs:
1607 1619 if xdiff:
1608 1620 mdiff.bdiff.xdiffblocks(*pair)
1609 1621 elif blocks:
1610 1622 mdiff.bdiff.blocks(*pair)
1611 1623 else:
1612 1624 mdiff.textdiff(*pair)
1613 1625 else:
1614 1626 q = queue()
1615 1627 for i in _xrange(threads):
1616 1628 q.put(None)
1617 1629 ready = threading.Condition()
1618 1630 done = threading.Event()
1619 1631 for i in _xrange(threads):
1620 1632 threading.Thread(target=_bdiffworker,
1621 1633 args=(q, blocks, xdiff, ready, done)).start()
1622 1634 q.join()
1623 1635 def d():
1624 1636 for pair in textpairs:
1625 1637 q.put(pair)
1626 1638 for i in _xrange(threads):
1627 1639 q.put(None)
1628 1640 with ready:
1629 1641 ready.notify_all()
1630 1642 q.join()
1631 1643 timer, fm = gettimer(ui, opts)
1632 1644 timer(d)
1633 1645 fm.end()
1634 1646
1635 1647 if withthreads:
1636 1648 done.set()
1637 1649 for i in _xrange(threads):
1638 1650 q.put(None)
1639 1651 with ready:
1640 1652 ready.notify_all()
1641 1653
1642 1654 @command(b'perfunidiff', revlogopts + formatteropts + [
1643 1655 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1644 1656 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
1645 1657 ], b'-c|-m|FILE REV')
1646 1658 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1647 1659 """benchmark a unified diff between revisions
1648 1660
1649 1661 This doesn't include any copy tracing - it's just a unified diff
1650 1662 of the texts.
1651 1663
1652 1664 By default, benchmark a diff between its delta parent and itself.
1653 1665
1654 1666 With ``--count``, benchmark diffs between delta parents and self for N
1655 1667 revisions starting at the specified revision.
1656 1668
1657 1669 With ``--alldata``, assume the requested revision is a changeset and
1658 1670 measure diffs for all changes related to that changeset (manifest
1659 1671 and filelogs).
1660 1672 """
1661 1673 opts = _byteskwargs(opts)
1662 1674 if opts[b'alldata']:
1663 1675 opts[b'changelog'] = True
1664 1676
1665 1677 if opts.get(b'changelog') or opts.get(b'manifest'):
1666 1678 file_, rev = None, file_
1667 1679 elif rev is None:
1668 1680 raise error.CommandError(b'perfunidiff', b'invalid arguments')
1669 1681
1670 1682 textpairs = []
1671 1683
1672 1684 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
1673 1685
1674 1686 startrev = r.rev(r.lookup(rev))
1675 1687 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1676 1688 if opts[b'alldata']:
1677 1689 # Load revisions associated with changeset.
1678 1690 ctx = repo[rev]
1679 1691 mtext = _manifestrevision(repo, ctx.manifestnode())
1680 1692 for pctx in ctx.parents():
1681 1693 pman = _manifestrevision(repo, pctx.manifestnode())
1682 1694 textpairs.append((pman, mtext))
1683 1695
1684 1696 # Load filelog revisions by iterating manifest delta.
1685 1697 man = ctx.manifest()
1686 1698 pman = ctx.p1().manifest()
1687 1699 for filename, change in pman.diff(man).items():
1688 1700 fctx = repo.file(filename)
1689 1701 f1 = fctx.revision(change[0][0] or -1)
1690 1702 f2 = fctx.revision(change[1][0] or -1)
1691 1703 textpairs.append((f1, f2))
1692 1704 else:
1693 1705 dp = r.deltaparent(rev)
1694 1706 textpairs.append((r.revision(dp), r.revision(rev)))
1695 1707
1696 1708 def d():
1697 1709 for left, right in textpairs:
1698 1710 # The date strings don't matter, so we pass empty strings.
1699 1711 headerlines, hunks = mdiff.unidiff(
1700 1712 left, b'', right, b'', b'left', b'right', binary=False)
1701 1713 # consume iterators in roughly the way patch.py does
1702 1714 b'\n'.join(headerlines)
1703 1715 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1704 1716 timer, fm = gettimer(ui, opts)
1705 1717 timer(d)
1706 1718 fm.end()
1707 1719
1708 1720 @command(b'perfdiffwd', formatteropts)
1709 1721 def perfdiffwd(ui, repo, **opts):
1710 1722 """Profile diff of working directory changes"""
1711 1723 opts = _byteskwargs(opts)
1712 1724 timer, fm = gettimer(ui, opts)
1713 1725 options = {
1714 1726 'w': 'ignore_all_space',
1715 1727 'b': 'ignore_space_change',
1716 1728 'B': 'ignore_blank_lines',
1717 1729 }
1718 1730
1719 1731 for diffopt in ('', 'w', 'b', 'B', 'wB'):
1720 1732 opts = dict((options[c], b'1') for c in diffopt)
1721 1733 def d():
1722 1734 ui.pushbuffer()
1723 1735 commands.diff(ui, repo, **opts)
1724 1736 ui.popbuffer()
1725 1737 diffopt = diffopt.encode('ascii')
1726 1738 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
1727 1739 timer(d, title=title)
1728 1740 fm.end()
1729 1741
1730 1742 @command(b'perfrevlogindex', revlogopts + formatteropts,
1731 1743 b'-c|-m|FILE')
1732 1744 def perfrevlogindex(ui, repo, file_=None, **opts):
1733 1745 """Benchmark operations against a revlog index.
1734 1746
1735 1747 This tests constructing a revlog instance, reading index data,
1736 1748 parsing index data, and performing various operations related to
1737 1749 index data.
1738 1750 """
1739 1751
1740 1752 opts = _byteskwargs(opts)
1741 1753
1742 1754 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
1743 1755
1744 1756 opener = getattr(rl, 'opener') # trick linter
1745 1757 indexfile = rl.indexfile
1746 1758 data = opener.read(indexfile)
1747 1759
1748 1760 header = struct.unpack(b'>I', data[0:4])[0]
1749 1761 version = header & 0xFFFF
1750 1762 if version == 1:
1751 1763 revlogio = revlog.revlogio()
1752 1764 inline = header & (1 << 16)
1753 1765 else:
1754 1766 raise error.Abort((b'unsupported revlog version: %d') % version)
1755 1767
1756 1768 rllen = len(rl)
1757 1769
1758 1770 node0 = rl.node(0)
1759 1771 node25 = rl.node(rllen // 4)
1760 1772 node50 = rl.node(rllen // 2)
1761 1773 node75 = rl.node(rllen // 4 * 3)
1762 1774 node100 = rl.node(rllen - 1)
1763 1775
1764 1776 allrevs = range(rllen)
1765 1777 allrevsrev = list(reversed(allrevs))
1766 1778 allnodes = [rl.node(rev) for rev in range(rllen)]
1767 1779 allnodesrev = list(reversed(allnodes))
1768 1780
1769 1781 def constructor():
1770 1782 revlog.revlog(opener, indexfile)
1771 1783
1772 1784 def read():
1773 1785 with opener(indexfile) as fh:
1774 1786 fh.read()
1775 1787
1776 1788 def parseindex():
1777 1789 revlogio.parseindex(data, inline)
1778 1790
1779 1791 def getentry(revornode):
1780 1792 index = revlogio.parseindex(data, inline)[0]
1781 1793 index[revornode]
1782 1794
1783 1795 def getentries(revs, count=1):
1784 1796 index = revlogio.parseindex(data, inline)[0]
1785 1797
1786 1798 for i in range(count):
1787 1799 for rev in revs:
1788 1800 index[rev]
1789 1801
1790 1802 def resolvenode(node):
1791 1803 nodemap = revlogio.parseindex(data, inline)[1]
1792 1804 # This only works for the C code.
1793 1805 if nodemap is None:
1794 1806 return
1795 1807
1796 1808 try:
1797 1809 nodemap[node]
1798 1810 except error.RevlogError:
1799 1811 pass
1800 1812
1801 1813 def resolvenodes(nodes, count=1):
1802 1814 nodemap = revlogio.parseindex(data, inline)[1]
1803 1815 if nodemap is None:
1804 1816 return
1805 1817
1806 1818 for i in range(count):
1807 1819 for node in nodes:
1808 1820 try:
1809 1821 nodemap[node]
1810 1822 except error.RevlogError:
1811 1823 pass
1812 1824
1813 1825 benches = [
1814 1826 (constructor, b'revlog constructor'),
1815 1827 (read, b'read'),
1816 1828 (parseindex, b'create index object'),
1817 1829 (lambda: getentry(0), b'retrieve index entry for rev 0'),
1818 1830 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
1819 1831 (lambda: resolvenode(node0), b'look up node at rev 0'),
1820 1832 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
1821 1833 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
1822 1834 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
1823 1835 (lambda: resolvenode(node100), b'look up node at tip'),
1824 1836 # 2x variation is to measure caching impact.
1825 1837 (lambda: resolvenodes(allnodes),
1826 1838 b'look up all nodes (forward)'),
1827 1839 (lambda: resolvenodes(allnodes, 2),
1828 1840 b'look up all nodes 2x (forward)'),
1829 1841 (lambda: resolvenodes(allnodesrev),
1830 1842 b'look up all nodes (reverse)'),
1831 1843 (lambda: resolvenodes(allnodesrev, 2),
1832 1844 b'look up all nodes 2x (reverse)'),
1833 1845 (lambda: getentries(allrevs),
1834 1846 b'retrieve all index entries (forward)'),
1835 1847 (lambda: getentries(allrevs, 2),
1836 1848 b'retrieve all index entries 2x (forward)'),
1837 1849 (lambda: getentries(allrevsrev),
1838 1850 b'retrieve all index entries (reverse)'),
1839 1851 (lambda: getentries(allrevsrev, 2),
1840 1852 b'retrieve all index entries 2x (reverse)'),
1841 1853 ]
1842 1854
1843 1855 for fn, title in benches:
1844 1856 timer, fm = gettimer(ui, opts)
1845 1857 timer(fn, title=title)
1846 1858 fm.end()
1847 1859
1848 1860 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
1849 1861 [(b'd', b'dist', 100, b'distance between the revisions'),
1850 1862 (b's', b'startrev', 0, b'revision to start reading at'),
1851 1863 (b'', b'reverse', False, b'read in reverse')],
1852 1864 b'-c|-m|FILE')
1853 1865 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
1854 1866 **opts):
1855 1867 """Benchmark reading a series of revisions from a revlog.
1856 1868
1857 1869 By default, we read every ``-d/--dist`` revision from 0 to tip of
1858 1870 the specified revlog.
1859 1871
1860 1872 The start revision can be defined via ``-s/--startrev``.
1861 1873 """
1862 1874 opts = _byteskwargs(opts)
1863 1875
1864 1876 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
1865 1877 rllen = getlen(ui)(rl)
1866 1878
1867 1879 if startrev < 0:
1868 1880 startrev = rllen + startrev
1869 1881
1870 1882 def d():
1871 1883 rl.clearcaches()
1872 1884
1873 1885 beginrev = startrev
1874 1886 endrev = rllen
1875 1887 dist = opts[b'dist']
1876 1888
1877 1889 if reverse:
1878 1890 beginrev, endrev = endrev - 1, beginrev - 1
1879 1891 dist = -1 * dist
1880 1892
1881 1893 for x in _xrange(beginrev, endrev, dist):
1882 1894 # Old revisions don't support passing int.
1883 1895 n = rl.node(x)
1884 1896 rl.revision(n)
1885 1897
1886 1898 timer, fm = gettimer(ui, opts)
1887 1899 timer(d)
1888 1900 fm.end()
1889 1901
1890 1902 @command(b'perfrevlogwrite', revlogopts + formatteropts +
1891 1903 [(b's', b'startrev', 1000, b'revision to start writing at'),
1892 1904 (b'', b'stoprev', -1, b'last revision to write'),
1893 1905 (b'', b'count', 3, b'last revision to write'),
1894 1906 (b'', b'details', False, b'print timing for every revisions tested'),
1895 1907 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
1896 1908 (b'', b'lazydeltabase', True, b'try the provided delta first'),
1897 1909 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1898 1910 ],
1899 1911 b'-c|-m|FILE')
1900 1912 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
1901 1913 """Benchmark writing a series of revisions to a revlog.
1902 1914
1903 1915 Possible source values are:
1904 1916 * `full`: add from a full text (default).
1905 1917 * `parent-1`: add from a delta to the first parent
1906 1918 * `parent-2`: add from a delta to the second parent if it exists
1907 1919 (use a delta from the first parent otherwise)
1908 1920 * `parent-smallest`: add from the smallest delta (either p1 or p2)
1909 1921 * `storage`: add from the existing precomputed deltas
1910 1922 """
1911 1923 opts = _byteskwargs(opts)
1912 1924
1913 1925 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
1914 1926 rllen = getlen(ui)(rl)
1915 1927 if startrev < 0:
1916 1928 startrev = rllen + startrev
1917 1929 if stoprev < 0:
1918 1930 stoprev = rllen + stoprev
1919 1931
1920 1932 lazydeltabase = opts['lazydeltabase']
1921 1933 source = opts['source']
1922 1934 clearcaches = opts['clear_caches']
1923 1935 validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest',
1924 1936 b'storage')
1925 1937 if source not in validsource:
1926 1938 raise error.Abort('invalid source type: %s' % source)
1927 1939
1928 1940 ### actually gather results
1929 1941 count = opts['count']
1930 1942 if count <= 0:
1931 1943 raise error.Abort('invalide run count: %d' % count)
1932 1944 allresults = []
1933 1945 for c in range(count):
1934 1946 timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1,
1935 1947 lazydeltabase=lazydeltabase,
1936 1948 clearcaches=clearcaches)
1937 1949 allresults.append(timing)
1938 1950
1939 1951 ### consolidate the results in a single list
1940 1952 results = []
1941 1953 for idx, (rev, t) in enumerate(allresults[0]):
1942 1954 ts = [t]
1943 1955 for other in allresults[1:]:
1944 1956 orev, ot = other[idx]
1945 1957 assert orev == rev
1946 1958 ts.append(ot)
1947 1959 results.append((rev, ts))
1948 1960 resultcount = len(results)
1949 1961
1950 1962 ### Compute and display relevant statistics
1951 1963
1952 1964 # get a formatter
1953 1965 fm = ui.formatter(b'perf', opts)
1954 1966 displayall = ui.configbool(b"perf", b"all-timing", False)
1955 1967
1956 1968 # print individual details if requested
1957 1969 if opts['details']:
1958 1970 for idx, item in enumerate(results, 1):
1959 1971 rev, data = item
1960 1972 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
1961 1973 formatone(fm, data, title=title, displayall=displayall)
1962 1974
1963 1975 # sorts results by median time
1964 1976 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
1965 1977 # list of (name, index) to display)
1966 1978 relevants = [
1967 1979 ("min", 0),
1968 1980 ("10%", resultcount * 10 // 100),
1969 1981 ("25%", resultcount * 25 // 100),
1970 1982 ("50%", resultcount * 70 // 100),
1971 1983 ("75%", resultcount * 75 // 100),
1972 1984 ("90%", resultcount * 90 // 100),
1973 1985 ("95%", resultcount * 95 // 100),
1974 1986 ("99%", resultcount * 99 // 100),
1975 1987 ("99.9%", resultcount * 999 // 1000),
1976 1988 ("99.99%", resultcount * 9999 // 10000),
1977 1989 ("99.999%", resultcount * 99999 // 100000),
1978 1990 ("max", -1),
1979 1991 ]
1980 1992 if not ui.quiet:
1981 1993 for name, idx in relevants:
1982 1994 data = results[idx]
1983 1995 title = '%s of %d, rev %d' % (name, resultcount, data[0])
1984 1996 formatone(fm, data[1], title=title, displayall=displayall)
1985 1997
1986 1998 # XXX summing that many float will not be very precise, we ignore this fact
1987 1999 # for now
1988 2000 totaltime = []
1989 2001 for item in allresults:
1990 2002 totaltime.append((sum(x[1][0] for x in item),
1991 2003 sum(x[1][1] for x in item),
1992 2004 sum(x[1][2] for x in item),)
1993 2005 )
1994 2006 formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
1995 2007 displayall=displayall)
1996 2008 fm.end()
1997 2009
1998 2010 class _faketr(object):
1999 2011 def add(s, x, y, z=None):
2000 2012 return None
2001 2013
2002 2014 def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None,
2003 2015 lazydeltabase=True, clearcaches=True):
2004 2016 timings = []
2005 2017 tr = _faketr()
2006 2018 with _temprevlog(ui, orig, startrev) as dest:
2007 2019 dest._lazydeltabase = lazydeltabase
2008 2020 revs = list(orig.revs(startrev, stoprev))
2009 2021 total = len(revs)
2010 2022 topic = 'adding'
2011 2023 if runidx is not None:
2012 2024 topic += ' (run #%d)' % runidx
2013 2025 # Support both old and new progress API
2014 2026 if util.safehasattr(ui, 'makeprogress'):
2015 2027 progress = ui.makeprogress(topic, unit='revs', total=total)
2016 2028 def updateprogress(pos):
2017 2029 progress.update(pos)
2018 2030 def completeprogress():
2019 2031 progress.complete()
2020 2032 else:
2021 2033 def updateprogress(pos):
2022 2034 ui.progress(topic, pos, unit='revs', total=total)
2023 2035 def completeprogress():
2024 2036 ui.progress(topic, None, unit='revs', total=total)
2025 2037
2026 2038 for idx, rev in enumerate(revs):
2027 2039 updateprogress(idx)
2028 2040 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
2029 2041 if clearcaches:
2030 2042 dest.index.clearcaches()
2031 2043 dest.clearcaches()
2032 2044 with timeone() as r:
2033 2045 dest.addrawrevision(*addargs, **addkwargs)
2034 2046 timings.append((rev, r[0]))
2035 2047 updateprogress(total)
2036 2048 completeprogress()
2037 2049 return timings
2038 2050
2039 2051 def _getrevisionseed(orig, rev, tr, source):
2040 2052 from mercurial.node import nullid
2041 2053
2042 2054 linkrev = orig.linkrev(rev)
2043 2055 node = orig.node(rev)
2044 2056 p1, p2 = orig.parents(node)
2045 2057 flags = orig.flags(rev)
2046 2058 cachedelta = None
2047 2059 text = None
2048 2060
2049 2061 if source == b'full':
2050 2062 text = orig.revision(rev)
2051 2063 elif source == b'parent-1':
2052 2064 baserev = orig.rev(p1)
2053 2065 cachedelta = (baserev, orig.revdiff(p1, rev))
2054 2066 elif source == b'parent-2':
2055 2067 parent = p2
2056 2068 if p2 == nullid:
2057 2069 parent = p1
2058 2070 baserev = orig.rev(parent)
2059 2071 cachedelta = (baserev, orig.revdiff(parent, rev))
2060 2072 elif source == b'parent-smallest':
2061 2073 p1diff = orig.revdiff(p1, rev)
2062 2074 parent = p1
2063 2075 diff = p1diff
2064 2076 if p2 != nullid:
2065 2077 p2diff = orig.revdiff(p2, rev)
2066 2078 if len(p1diff) > len(p2diff):
2067 2079 parent = p2
2068 2080 diff = p2diff
2069 2081 baserev = orig.rev(parent)
2070 2082 cachedelta = (baserev, diff)
2071 2083 elif source == b'storage':
2072 2084 baserev = orig.deltaparent(rev)
2073 2085 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
2074 2086
2075 2087 return ((text, tr, linkrev, p1, p2),
2076 2088 {'node': node, 'flags': flags, 'cachedelta': cachedelta})
2077 2089
2078 2090 @contextlib.contextmanager
2079 2091 def _temprevlog(ui, orig, truncaterev):
2080 2092 from mercurial import vfs as vfsmod
2081 2093
2082 2094 if orig._inline:
2083 2095 raise error.Abort('not supporting inline revlog (yet)')
2084 2096
2085 2097 origindexpath = orig.opener.join(orig.indexfile)
2086 2098 origdatapath = orig.opener.join(orig.datafile)
2087 2099 indexname = 'revlog.i'
2088 2100 dataname = 'revlog.d'
2089 2101
2090 2102 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
2091 2103 try:
2092 2104 # copy the data file in a temporary directory
2093 2105 ui.debug('copying data in %s\n' % tmpdir)
2094 2106 destindexpath = os.path.join(tmpdir, 'revlog.i')
2095 2107 destdatapath = os.path.join(tmpdir, 'revlog.d')
2096 2108 shutil.copyfile(origindexpath, destindexpath)
2097 2109 shutil.copyfile(origdatapath, destdatapath)
2098 2110
2099 2111 # remove the data we want to add again
2100 2112 ui.debug('truncating data to be rewritten\n')
2101 2113 with open(destindexpath, 'ab') as index:
2102 2114 index.seek(0)
2103 2115 index.truncate(truncaterev * orig._io.size)
2104 2116 with open(destdatapath, 'ab') as data:
2105 2117 data.seek(0)
2106 2118 data.truncate(orig.start(truncaterev))
2107 2119
2108 2120 # instantiate a new revlog from the temporary copy
2109 2121 ui.debug('truncating adding to be rewritten\n')
2110 2122 vfs = vfsmod.vfs(tmpdir)
2111 2123 vfs.options = getattr(orig.opener, 'options', None)
2112 2124
2113 2125 dest = revlog.revlog(vfs,
2114 2126 indexfile=indexname,
2115 2127 datafile=dataname)
2116 2128 if dest._inline:
2117 2129 raise error.Abort('not supporting inline revlog (yet)')
2118 2130 # make sure internals are initialized
2119 2131 dest.revision(len(dest) - 1)
2120 2132 yield dest
2121 2133 del dest, vfs
2122 2134 finally:
2123 2135 shutil.rmtree(tmpdir, True)
2124 2136
2125 2137 @command(b'perfrevlogchunks', revlogopts + formatteropts +
2126 2138 [(b'e', b'engines', b'', b'compression engines to use'),
2127 2139 (b's', b'startrev', 0, b'revision to start at')],
2128 2140 b'-c|-m|FILE')
2129 2141 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
2130 2142 """Benchmark operations on revlog chunks.
2131 2143
2132 2144 Logically, each revlog is a collection of fulltext revisions. However,
2133 2145 stored within each revlog are "chunks" of possibly compressed data. This
2134 2146 data needs to be read and decompressed or compressed and written.
2135 2147
2136 2148 This command measures the time it takes to read+decompress and recompress
2137 2149 chunks in a revlog. It effectively isolates I/O and compression performance.
2138 2150 For measurements of higher-level operations like resolving revisions,
2139 2151 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
2140 2152 """
2141 2153 opts = _byteskwargs(opts)
2142 2154
2143 2155 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
2144 2156
2145 2157 # _chunkraw was renamed to _getsegmentforrevs.
2146 2158 try:
2147 2159 segmentforrevs = rl._getsegmentforrevs
2148 2160 except AttributeError:
2149 2161 segmentforrevs = rl._chunkraw
2150 2162
2151 2163 # Verify engines argument.
2152 2164 if engines:
2153 2165 engines = set(e.strip() for e in engines.split(b','))
2154 2166 for engine in engines:
2155 2167 try:
2156 2168 util.compressionengines[engine]
2157 2169 except KeyError:
2158 2170 raise error.Abort(b'unknown compression engine: %s' % engine)
2159 2171 else:
2160 2172 engines = []
2161 2173 for e in util.compengines:
2162 2174 engine = util.compengines[e]
2163 2175 try:
2164 2176 if engine.available():
2165 2177 engine.revlogcompressor().compress(b'dummy')
2166 2178 engines.append(e)
2167 2179 except NotImplementedError:
2168 2180 pass
2169 2181
2170 2182 revs = list(rl.revs(startrev, len(rl) - 1))
2171 2183
2172 2184 def rlfh(rl):
2173 2185 if rl._inline:
2174 2186 return getsvfs(repo)(rl.indexfile)
2175 2187 else:
2176 2188 return getsvfs(repo)(rl.datafile)
2177 2189
2178 2190 def doread():
2179 2191 rl.clearcaches()
2180 2192 for rev in revs:
2181 2193 segmentforrevs(rev, rev)
2182 2194
2183 2195 def doreadcachedfh():
2184 2196 rl.clearcaches()
2185 2197 fh = rlfh(rl)
2186 2198 for rev in revs:
2187 2199 segmentforrevs(rev, rev, df=fh)
2188 2200
2189 2201 def doreadbatch():
2190 2202 rl.clearcaches()
2191 2203 segmentforrevs(revs[0], revs[-1])
2192 2204
2193 2205 def doreadbatchcachedfh():
2194 2206 rl.clearcaches()
2195 2207 fh = rlfh(rl)
2196 2208 segmentforrevs(revs[0], revs[-1], df=fh)
2197 2209
2198 2210 def dochunk():
2199 2211 rl.clearcaches()
2200 2212 fh = rlfh(rl)
2201 2213 for rev in revs:
2202 2214 rl._chunk(rev, df=fh)
2203 2215
2204 2216 chunks = [None]
2205 2217
2206 2218 def dochunkbatch():
2207 2219 rl.clearcaches()
2208 2220 fh = rlfh(rl)
2209 2221 # Save chunks as a side-effect.
2210 2222 chunks[0] = rl._chunks(revs, df=fh)
2211 2223
2212 2224 def docompress(compressor):
2213 2225 rl.clearcaches()
2214 2226
2215 2227 try:
2216 2228 # Swap in the requested compression engine.
2217 2229 oldcompressor = rl._compressor
2218 2230 rl._compressor = compressor
2219 2231 for chunk in chunks[0]:
2220 2232 rl.compress(chunk)
2221 2233 finally:
2222 2234 rl._compressor = oldcompressor
2223 2235
2224 2236 benches = [
2225 2237 (lambda: doread(), b'read'),
2226 2238 (lambda: doreadcachedfh(), b'read w/ reused fd'),
2227 2239 (lambda: doreadbatch(), b'read batch'),
2228 2240 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
2229 2241 (lambda: dochunk(), b'chunk'),
2230 2242 (lambda: dochunkbatch(), b'chunk batch'),
2231 2243 ]
2232 2244
2233 2245 for engine in sorted(engines):
2234 2246 compressor = util.compengines[engine].revlogcompressor()
2235 2247 benches.append((functools.partial(docompress, compressor),
2236 2248 b'compress w/ %s' % engine))
2237 2249
2238 2250 for fn, title in benches:
2239 2251 timer, fm = gettimer(ui, opts)
2240 2252 timer(fn, title=title)
2241 2253 fm.end()
2242 2254
2243 2255 @command(b'perfrevlogrevision', revlogopts + formatteropts +
2244 2256 [(b'', b'cache', False, b'use caches instead of clearing')],
2245 2257 b'-c|-m|FILE REV')
2246 2258 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
2247 2259 """Benchmark obtaining a revlog revision.
2248 2260
2249 2261 Obtaining a revlog revision consists of roughly the following steps:
2250 2262
2251 2263 1. Compute the delta chain
2252 2264 2. Slice the delta chain if applicable
2253 2265 3. Obtain the raw chunks for that delta chain
2254 2266 4. Decompress each raw chunk
2255 2267 5. Apply binary patches to obtain fulltext
2256 2268 6. Verify hash of fulltext
2257 2269
2258 2270 This command measures the time spent in each of these phases.
2259 2271 """
2260 2272 opts = _byteskwargs(opts)
2261 2273
2262 2274 if opts.get(b'changelog') or opts.get(b'manifest'):
2263 2275 file_, rev = None, file_
2264 2276 elif rev is None:
2265 2277 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
2266 2278
2267 2279 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
2268 2280
2269 2281 # _chunkraw was renamed to _getsegmentforrevs.
2270 2282 try:
2271 2283 segmentforrevs = r._getsegmentforrevs
2272 2284 except AttributeError:
2273 2285 segmentforrevs = r._chunkraw
2274 2286
2275 2287 node = r.lookup(rev)
2276 2288 rev = r.rev(node)
2277 2289
2278 2290 def getrawchunks(data, chain):
2279 2291 start = r.start
2280 2292 length = r.length
2281 2293 inline = r._inline
2282 2294 iosize = r._io.size
2283 2295 buffer = util.buffer
2284 2296
2285 2297 chunks = []
2286 2298 ladd = chunks.append
2287 2299 for idx, item in enumerate(chain):
2288 2300 offset = start(item[0])
2289 2301 bits = data[idx]
2290 2302 for rev in item:
2291 2303 chunkstart = start(rev)
2292 2304 if inline:
2293 2305 chunkstart += (rev + 1) * iosize
2294 2306 chunklength = length(rev)
2295 2307 ladd(buffer(bits, chunkstart - offset, chunklength))
2296 2308
2297 2309 return chunks
2298 2310
2299 2311 def dodeltachain(rev):
2300 2312 if not cache:
2301 2313 r.clearcaches()
2302 2314 r._deltachain(rev)
2303 2315
2304 2316 def doread(chain):
2305 2317 if not cache:
2306 2318 r.clearcaches()
2307 2319 for item in slicedchain:
2308 2320 segmentforrevs(item[0], item[-1])
2309 2321
2310 2322 def doslice(r, chain, size):
2311 2323 for s in slicechunk(r, chain, targetsize=size):
2312 2324 pass
2313 2325
2314 2326 def dorawchunks(data, chain):
2315 2327 if not cache:
2316 2328 r.clearcaches()
2317 2329 getrawchunks(data, chain)
2318 2330
2319 2331 def dodecompress(chunks):
2320 2332 decomp = r.decompress
2321 2333 for chunk in chunks:
2322 2334 decomp(chunk)
2323 2335
2324 2336 def dopatch(text, bins):
2325 2337 if not cache:
2326 2338 r.clearcaches()
2327 2339 mdiff.patches(text, bins)
2328 2340
2329 2341 def dohash(text):
2330 2342 if not cache:
2331 2343 r.clearcaches()
2332 2344 r.checkhash(text, node, rev=rev)
2333 2345
2334 2346 def dorevision():
2335 2347 if not cache:
2336 2348 r.clearcaches()
2337 2349 r.revision(node)
2338 2350
2339 2351 try:
2340 2352 from mercurial.revlogutils.deltas import slicechunk
2341 2353 except ImportError:
2342 2354 slicechunk = getattr(revlog, '_slicechunk', None)
2343 2355
2344 2356 size = r.length(rev)
2345 2357 chain = r._deltachain(rev)[0]
2346 2358 if not getattr(r, '_withsparseread', False):
2347 2359 slicedchain = (chain,)
2348 2360 else:
2349 2361 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
2350 2362 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
2351 2363 rawchunks = getrawchunks(data, slicedchain)
2352 2364 bins = r._chunks(chain)
2353 2365 text = bytes(bins[0])
2354 2366 bins = bins[1:]
2355 2367 text = mdiff.patches(text, bins)
2356 2368
2357 2369 benches = [
2358 2370 (lambda: dorevision(), b'full'),
2359 2371 (lambda: dodeltachain(rev), b'deltachain'),
2360 2372 (lambda: doread(chain), b'read'),
2361 2373 ]
2362 2374
2363 2375 if getattr(r, '_withsparseread', False):
2364 2376 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
2365 2377 benches.append(slicing)
2366 2378
2367 2379 benches.extend([
2368 2380 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
2369 2381 (lambda: dodecompress(rawchunks), b'decompress'),
2370 2382 (lambda: dopatch(text, bins), b'patch'),
2371 2383 (lambda: dohash(text), b'hash'),
2372 2384 ])
2373 2385
2374 2386 timer, fm = gettimer(ui, opts)
2375 2387 for fn, title in benches:
2376 2388 timer(fn, title=title)
2377 2389 fm.end()
2378 2390
2379 2391 @command(b'perfrevset',
2380 2392 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
2381 2393 (b'', b'contexts', False, b'obtain changectx for each revision')]
2382 2394 + formatteropts, b"REVSET")
2383 2395 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
2384 2396 """benchmark the execution time of a revset
2385 2397
2386 2398 Use the --clean option if need to evaluate the impact of build volatile
2387 2399 revisions set cache on the revset execution. Volatile cache hold filtered
2388 2400 and obsolete related cache."""
2389 2401 opts = _byteskwargs(opts)
2390 2402
2391 2403 timer, fm = gettimer(ui, opts)
2392 2404 def d():
2393 2405 if clear:
2394 2406 repo.invalidatevolatilesets()
2395 2407 if contexts:
2396 2408 for ctx in repo.set(expr): pass
2397 2409 else:
2398 2410 for r in repo.revs(expr): pass
2399 2411 timer(d)
2400 2412 fm.end()
2401 2413
2402 2414 @command(b'perfvolatilesets',
2403 2415 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
2404 2416 ] + formatteropts)
2405 2417 def perfvolatilesets(ui, repo, *names, **opts):
2406 2418 """benchmark the computation of various volatile set
2407 2419
2408 2420 Volatile set computes element related to filtering and obsolescence."""
2409 2421 opts = _byteskwargs(opts)
2410 2422 timer, fm = gettimer(ui, opts)
2411 2423 repo = repo.unfiltered()
2412 2424
2413 2425 def getobs(name):
2414 2426 def d():
2415 2427 repo.invalidatevolatilesets()
2416 2428 if opts[b'clear_obsstore']:
2417 2429 clearfilecache(repo, b'obsstore')
2418 2430 obsolete.getrevs(repo, name)
2419 2431 return d
2420 2432
2421 2433 allobs = sorted(obsolete.cachefuncs)
2422 2434 if names:
2423 2435 allobs = [n for n in allobs if n in names]
2424 2436
2425 2437 for name in allobs:
2426 2438 timer(getobs(name), title=name)
2427 2439
2428 2440 def getfiltered(name):
2429 2441 def d():
2430 2442 repo.invalidatevolatilesets()
2431 2443 if opts[b'clear_obsstore']:
2432 2444 clearfilecache(repo, b'obsstore')
2433 2445 repoview.filterrevs(repo, name)
2434 2446 return d
2435 2447
2436 2448 allfilter = sorted(repoview.filtertable)
2437 2449 if names:
2438 2450 allfilter = [n for n in allfilter if n in names]
2439 2451
2440 2452 for name in allfilter:
2441 2453 timer(getfiltered(name), title=name)
2442 2454 fm.end()
2443 2455
2444 2456 @command(b'perfbranchmap',
2445 2457 [(b'f', b'full', False,
2446 2458 b'Includes build time of subset'),
2447 2459 (b'', b'clear-revbranch', False,
2448 2460 b'purge the revbranch cache between computation'),
2449 2461 ] + formatteropts)
2450 2462 def perfbranchmap(ui, repo, *filternames, **opts):
2451 2463 """benchmark the update of a branchmap
2452 2464
2453 2465 This benchmarks the full repo.branchmap() call with read and write disabled
2454 2466 """
2455 2467 opts = _byteskwargs(opts)
2456 2468 full = opts.get(b"full", False)
2457 2469 clear_revbranch = opts.get(b"clear_revbranch", False)
2458 2470 timer, fm = gettimer(ui, opts)
2459 2471 def getbranchmap(filtername):
2460 2472 """generate a benchmark function for the filtername"""
2461 2473 if filtername is None:
2462 2474 view = repo
2463 2475 else:
2464 2476 view = repo.filtered(filtername)
2465 2477 if util.safehasattr(view._branchcaches, '_per_filter'):
2466 2478 filtered = view._branchcaches._per_filter
2467 2479 else:
2468 2480 # older versions
2469 2481 filtered = view._branchcaches
2470 2482 def d():
2471 2483 if clear_revbranch:
2472 2484 repo.revbranchcache()._clear()
2473 2485 if full:
2474 2486 view._branchcaches.clear()
2475 2487 else:
2476 2488 filtered.pop(filtername, None)
2477 2489 view.branchmap()
2478 2490 return d
2479 2491 # add filter in smaller subset to bigger subset
2480 2492 possiblefilters = set(repoview.filtertable)
2481 2493 if filternames:
2482 2494 possiblefilters &= set(filternames)
2483 2495 subsettable = getbranchmapsubsettable()
2484 2496 allfilters = []
2485 2497 while possiblefilters:
2486 2498 for name in possiblefilters:
2487 2499 subset = subsettable.get(name)
2488 2500 if subset not in possiblefilters:
2489 2501 break
2490 2502 else:
2491 2503 assert False, b'subset cycle %s!' % possiblefilters
2492 2504 allfilters.append(name)
2493 2505 possiblefilters.remove(name)
2494 2506
2495 2507 # warm the cache
2496 2508 if not full:
2497 2509 for name in allfilters:
2498 2510 repo.filtered(name).branchmap()
2499 2511 if not filternames or b'unfiltered' in filternames:
2500 2512 # add unfiltered
2501 2513 allfilters.append(None)
2502 2514
2503 2515 if util.safehasattr(branchmap.branchcache, 'fromfile'):
2504 2516 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
2505 2517 branchcacheread.set(classmethod(lambda *args: None))
2506 2518 else:
2507 2519 # older versions
2508 2520 branchcacheread = safeattrsetter(branchmap, b'read')
2509 2521 branchcacheread.set(lambda *args: None)
2510 2522 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
2511 2523 branchcachewrite.set(lambda *args: None)
2512 2524 try:
2513 2525 for name in allfilters:
2514 2526 printname = name
2515 2527 if name is None:
2516 2528 printname = b'unfiltered'
2517 2529 timer(getbranchmap(name), title=str(printname))
2518 2530 finally:
2519 2531 branchcacheread.restore()
2520 2532 branchcachewrite.restore()
2521 2533 fm.end()
2522 2534
2523 2535 @command(b'perfbranchmapupdate', [
2524 2536 (b'', b'base', [], b'subset of revision to start from'),
2525 2537 (b'', b'target', [], b'subset of revision to end with'),
2526 2538 (b'', b'clear-caches', False, b'clear cache between each runs')
2527 2539 ] + formatteropts)
2528 2540 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
2529 2541 """benchmark branchmap update from for <base> revs to <target> revs
2530 2542
2531 2543 If `--clear-caches` is passed, the following items will be reset before
2532 2544 each update:
2533 2545 * the changelog instance and associated indexes
2534 2546 * the rev-branch-cache instance
2535 2547
2536 2548 Examples:
2537 2549
2538 2550 # update for the one last revision
2539 2551 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
2540 2552
2541 2553 $ update for change coming with a new branch
2542 2554 $ hg perfbranchmapupdate --base 'stable' --target 'default'
2543 2555 """
2544 2556 from mercurial import branchmap
2545 2557 from mercurial import repoview
2546 2558 opts = _byteskwargs(opts)
2547 2559 timer, fm = gettimer(ui, opts)
2548 2560 clearcaches = opts[b'clear_caches']
2549 2561 unfi = repo.unfiltered()
2550 2562 x = [None] # used to pass data between closure
2551 2563
2552 2564 # we use a `list` here to avoid possible side effect from smartset
2553 2565 baserevs = list(scmutil.revrange(repo, base))
2554 2566 targetrevs = list(scmutil.revrange(repo, target))
2555 2567 if not baserevs:
2556 2568 raise error.Abort(b'no revisions selected for --base')
2557 2569 if not targetrevs:
2558 2570 raise error.Abort(b'no revisions selected for --target')
2559 2571
2560 2572 # make sure the target branchmap also contains the one in the base
2561 2573 targetrevs = list(set(baserevs) | set(targetrevs))
2562 2574 targetrevs.sort()
2563 2575
2564 2576 cl = repo.changelog
2565 2577 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
2566 2578 allbaserevs.sort()
2567 2579 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
2568 2580
2569 2581 newrevs = list(alltargetrevs.difference(allbaserevs))
2570 2582 newrevs.sort()
2571 2583
2572 2584 allrevs = frozenset(unfi.changelog.revs())
2573 2585 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
2574 2586 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
2575 2587
2576 2588 def basefilter(repo, visibilityexceptions=None):
2577 2589 return basefilterrevs
2578 2590
2579 2591 def targetfilter(repo, visibilityexceptions=None):
2580 2592 return targetfilterrevs
2581 2593
2582 2594 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
2583 2595 ui.status(msg % (len(allbaserevs), len(newrevs)))
2584 2596 if targetfilterrevs:
2585 2597 msg = b'(%d revisions still filtered)\n'
2586 2598 ui.status(msg % len(targetfilterrevs))
2587 2599
2588 2600 try:
2589 2601 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
2590 2602 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
2591 2603
2592 2604 baserepo = repo.filtered(b'__perf_branchmap_update_base')
2593 2605 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
2594 2606
2595 2607 # try to find an existing branchmap to reuse
2596 2608 subsettable = getbranchmapsubsettable()
2597 2609 candidatefilter = subsettable.get(None)
2598 2610 while candidatefilter is not None:
2599 2611 candidatebm = repo.filtered(candidatefilter).branchmap()
2600 2612 if candidatebm.validfor(baserepo):
2601 2613 filtered = repoview.filterrevs(repo, candidatefilter)
2602 2614 missing = [r for r in allbaserevs if r in filtered]
2603 2615 base = candidatebm.copy()
2604 2616 base.update(baserepo, missing)
2605 2617 break
2606 2618 candidatefilter = subsettable.get(candidatefilter)
2607 2619 else:
2608 2620 # no suitable subset where found
2609 2621 base = branchmap.branchcache()
2610 2622 base.update(baserepo, allbaserevs)
2611 2623
2612 2624 def setup():
2613 2625 x[0] = base.copy()
2614 2626 if clearcaches:
2615 2627 unfi._revbranchcache = None
2616 2628 clearchangelog(repo)
2617 2629
2618 2630 def bench():
2619 2631 x[0].update(targetrepo, newrevs)
2620 2632
2621 2633 timer(bench, setup=setup)
2622 2634 fm.end()
2623 2635 finally:
2624 2636 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
2625 2637 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
2626 2638
2627 2639 @command(b'perfbranchmapload', [
2628 2640 (b'f', b'filter', b'', b'Specify repoview filter'),
2629 2641 (b'', b'list', False, b'List brachmap filter caches'),
2630 2642 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
2631 2643
2632 2644 ] + formatteropts)
2633 2645 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
2634 2646 """benchmark reading the branchmap"""
2635 2647 opts = _byteskwargs(opts)
2636 2648 clearrevlogs = opts[b'clear_revlogs']
2637 2649
2638 2650 if list:
2639 2651 for name, kind, st in repo.cachevfs.readdir(stat=True):
2640 2652 if name.startswith(b'branch2'):
2641 2653 filtername = name.partition(b'-')[2] or b'unfiltered'
2642 2654 ui.status(b'%s - %s\n'
2643 2655 % (filtername, util.bytecount(st.st_size)))
2644 2656 return
2645 2657 if not filter:
2646 2658 filter = None
2647 2659 subsettable = getbranchmapsubsettable()
2648 2660 if filter is None:
2649 2661 repo = repo.unfiltered()
2650 2662 else:
2651 2663 repo = repoview.repoview(repo, filter)
2652 2664
2653 2665 repo.branchmap() # make sure we have a relevant, up to date branchmap
2654 2666
2655 2667 try:
2656 2668 fromfile = branchmap.branchcache.fromfile
2657 2669 except AttributeError:
2658 2670 # older versions
2659 2671 fromfile = branchmap.read
2660 2672
2661 2673 currentfilter = filter
2662 2674 # try once without timer, the filter may not be cached
2663 2675 while fromfile(repo) is None:
2664 2676 currentfilter = subsettable.get(currentfilter)
2665 2677 if currentfilter is None:
2666 2678 raise error.Abort(b'No branchmap cached for %s repo'
2667 2679 % (filter or b'unfiltered'))
2668 2680 repo = repo.filtered(currentfilter)
2669 2681 timer, fm = gettimer(ui, opts)
2670 2682 def setup():
2671 2683 if clearrevlogs:
2672 2684 clearchangelog(repo)
2673 2685 def bench():
2674 2686 fromfile(repo)
2675 2687 timer(bench, setup=setup)
2676 2688 fm.end()
2677 2689
2678 2690 @command(b'perfloadmarkers')
2679 2691 def perfloadmarkers(ui, repo):
2680 2692 """benchmark the time to parse the on-disk markers for a repo
2681 2693
2682 2694 Result is the number of markers in the repo."""
2683 2695 timer, fm = gettimer(ui)
2684 2696 svfs = getsvfs(repo)
2685 2697 timer(lambda: len(obsolete.obsstore(svfs)))
2686 2698 fm.end()
2687 2699
2688 2700 @command(b'perflrucachedict', formatteropts +
2689 2701 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
2690 2702 (b'', b'mincost', 0, b'smallest cost of items in cache'),
2691 2703 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
2692 2704 (b'', b'size', 4, b'size of cache'),
2693 2705 (b'', b'gets', 10000, b'number of key lookups'),
2694 2706 (b'', b'sets', 10000, b'number of key sets'),
2695 2707 (b'', b'mixed', 10000, b'number of mixed mode operations'),
2696 2708 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
2697 2709 norepo=True)
2698 2710 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
2699 2711 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
2700 2712 opts = _byteskwargs(opts)
2701 2713
2702 2714 def doinit():
2703 2715 for i in _xrange(10000):
2704 2716 util.lrucachedict(size)
2705 2717
2706 2718 costrange = list(range(mincost, maxcost + 1))
2707 2719
2708 2720 values = []
2709 2721 for i in _xrange(size):
2710 2722 values.append(random.randint(0, _maxint))
2711 2723
2712 2724 # Get mode fills the cache and tests raw lookup performance with no
2713 2725 # eviction.
2714 2726 getseq = []
2715 2727 for i in _xrange(gets):
2716 2728 getseq.append(random.choice(values))
2717 2729
2718 2730 def dogets():
2719 2731 d = util.lrucachedict(size)
2720 2732 for v in values:
2721 2733 d[v] = v
2722 2734 for key in getseq:
2723 2735 value = d[key]
2724 2736 value # silence pyflakes warning
2725 2737
2726 2738 def dogetscost():
2727 2739 d = util.lrucachedict(size, maxcost=costlimit)
2728 2740 for i, v in enumerate(values):
2729 2741 d.insert(v, v, cost=costs[i])
2730 2742 for key in getseq:
2731 2743 try:
2732 2744 value = d[key]
2733 2745 value # silence pyflakes warning
2734 2746 except KeyError:
2735 2747 pass
2736 2748
2737 2749 # Set mode tests insertion speed with cache eviction.
2738 2750 setseq = []
2739 2751 costs = []
2740 2752 for i in _xrange(sets):
2741 2753 setseq.append(random.randint(0, _maxint))
2742 2754 costs.append(random.choice(costrange))
2743 2755
2744 2756 def doinserts():
2745 2757 d = util.lrucachedict(size)
2746 2758 for v in setseq:
2747 2759 d.insert(v, v)
2748 2760
2749 2761 def doinsertscost():
2750 2762 d = util.lrucachedict(size, maxcost=costlimit)
2751 2763 for i, v in enumerate(setseq):
2752 2764 d.insert(v, v, cost=costs[i])
2753 2765
2754 2766 def dosets():
2755 2767 d = util.lrucachedict(size)
2756 2768 for v in setseq:
2757 2769 d[v] = v
2758 2770
2759 2771 # Mixed mode randomly performs gets and sets with eviction.
2760 2772 mixedops = []
2761 2773 for i in _xrange(mixed):
2762 2774 r = random.randint(0, 100)
2763 2775 if r < mixedgetfreq:
2764 2776 op = 0
2765 2777 else:
2766 2778 op = 1
2767 2779
2768 2780 mixedops.append((op,
2769 2781 random.randint(0, size * 2),
2770 2782 random.choice(costrange)))
2771 2783
2772 2784 def domixed():
2773 2785 d = util.lrucachedict(size)
2774 2786
2775 2787 for op, v, cost in mixedops:
2776 2788 if op == 0:
2777 2789 try:
2778 2790 d[v]
2779 2791 except KeyError:
2780 2792 pass
2781 2793 else:
2782 2794 d[v] = v
2783 2795
2784 2796 def domixedcost():
2785 2797 d = util.lrucachedict(size, maxcost=costlimit)
2786 2798
2787 2799 for op, v, cost in mixedops:
2788 2800 if op == 0:
2789 2801 try:
2790 2802 d[v]
2791 2803 except KeyError:
2792 2804 pass
2793 2805 else:
2794 2806 d.insert(v, v, cost=cost)
2795 2807
2796 2808 benches = [
2797 2809 (doinit, b'init'),
2798 2810 ]
2799 2811
2800 2812 if costlimit:
2801 2813 benches.extend([
2802 2814 (dogetscost, b'gets w/ cost limit'),
2803 2815 (doinsertscost, b'inserts w/ cost limit'),
2804 2816 (domixedcost, b'mixed w/ cost limit'),
2805 2817 ])
2806 2818 else:
2807 2819 benches.extend([
2808 2820 (dogets, b'gets'),
2809 2821 (doinserts, b'inserts'),
2810 2822 (dosets, b'sets'),
2811 2823 (domixed, b'mixed')
2812 2824 ])
2813 2825
2814 2826 for fn, title in benches:
2815 2827 timer, fm = gettimer(ui, opts)
2816 2828 timer(fn, title=title)
2817 2829 fm.end()
2818 2830
2819 2831 @command(b'perfwrite', formatteropts)
2820 2832 def perfwrite(ui, repo, **opts):
2821 2833 """microbenchmark ui.write
2822 2834 """
2823 2835 opts = _byteskwargs(opts)
2824 2836
2825 2837 timer, fm = gettimer(ui, opts)
2826 2838 def write():
2827 2839 for i in range(100000):
2828 2840 ui.write((b'Testing write performance\n'))
2829 2841 timer(write)
2830 2842 fm.end()
2831 2843
2832 2844 def uisetup(ui):
2833 2845 if (util.safehasattr(cmdutil, b'openrevlog') and
2834 2846 not util.safehasattr(commands, b'debugrevlogopts')):
2835 2847 # for "historical portability":
2836 2848 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
2837 2849 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
2838 2850 # openrevlog() should cause failure, because it has been
2839 2851 # available since 3.5 (or 49c583ca48c4).
2840 2852 def openrevlog(orig, repo, cmd, file_, opts):
2841 2853 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
2842 2854 raise error.Abort(b"This version doesn't support --dir option",
2843 2855 hint=b"use 3.5 or later")
2844 2856 return orig(repo, cmd, file_, opts)
2845 2857 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
2846 2858
2847 2859 @command(b'perfprogress', formatteropts + [
2848 2860 (b'', b'topic', b'topic', b'topic for progress messages'),
2849 2861 (b'c', b'total', 1000000, b'total value we are progressing to'),
2850 2862 ], norepo=True)
2851 2863 def perfprogress(ui, topic=None, total=None, **opts):
2852 2864 """printing of progress bars"""
2853 2865 opts = _byteskwargs(opts)
2854 2866
2855 2867 timer, fm = gettimer(ui, opts)
2856 2868
2857 2869 def doprogress():
2858 2870 with ui.makeprogress(topic, total=total) as progress:
2859 2871 for i in pycompat.xrange(total):
2860 2872 progress.increment()
2861 2873
2862 2874 timer(doprogress)
2863 2875 fm.end()
@@ -1,356 +1,378
1 1 #require test-repo
2 2
3 3 Set vars:
4 4
5 5 $ . "$TESTDIR/helpers-testrepo.sh"
6 6 $ CONTRIBDIR="$TESTDIR/../contrib"
7 7
8 8 Prepare repo:
9 9
10 10 $ hg init
11 11
12 12 $ echo this is file a > a
13 13 $ hg add a
14 14 $ hg commit -m first
15 15
16 16 $ echo adding to file a >> a
17 17 $ hg commit -m second
18 18
19 19 $ echo adding more to file a >> a
20 20 $ hg commit -m third
21 21
22 22 $ hg up -r 0
23 23 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
24 24 $ echo merge-this >> a
25 25 $ hg commit -m merge-able
26 26 created new head
27 27
28 28 $ hg up -r 2
29 29 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
30 30
31 31 perfstatus
32 32
33 33 $ cat >> $HGRCPATH << EOF
34 34 > [extensions]
35 35 > perf=$CONTRIBDIR/perf.py
36 36 > [perf]
37 37 > presleep=0
38 38 > stub=on
39 39 > parentscount=1
40 40 > EOF
41 41 $ hg help -e perf
42 42 perf extension - helper extension to measure performance
43 43
44 44 Configurations
45 45 ==============
46 46
47 47 "perf"
48 48 ------
49 49
50 50 "all-timing"
51 51 When set, additional statistics will be reported for each benchmark: best,
52 52 worst, median average. If not set only the best timing is reported
53 53 (default: off).
54 54
55 55 "presleep"
56 56 number of second to wait before any group of runs (default: 1)
57 57
58 "pre-run"
59 number of run to perform before starting measurement.
60
58 61 "run-limits"
59 62 Control the number of runs each benchmark will perform. The option value
60 63 should be a list of '<time>-<numberofrun>' pairs. After each run the
61 64 conditions are considered in order with the following logic:
62 65
63 66 If benchmark has been running for <time> seconds, and we have performed
64 67 <numberofrun> iterations, stop the benchmark,
65 68
66 69 The default value is: '3.0-100, 10.0-3'
67 70
68 71 "stub"
69 72 When set, benchmarks will only be run once, useful for testing (default:
70 73 off)
71 74
72 75 list of commands:
73 76
74 77 perfaddremove
75 78 (no help text available)
76 79 perfancestors
77 80 (no help text available)
78 81 perfancestorset
79 82 (no help text available)
80 83 perfannotate (no help text available)
81 84 perfbdiff benchmark a bdiff between revisions
82 85 perfbookmarks
83 86 benchmark parsing bookmarks from disk to memory
84 87 perfbranchmap
85 88 benchmark the update of a branchmap
86 89 perfbranchmapload
87 90 benchmark reading the branchmap
88 91 perfbranchmapupdate
89 92 benchmark branchmap update from for <base> revs to <target>
90 93 revs
91 94 perfbundleread
92 95 Benchmark reading of bundle files.
93 96 perfcca (no help text available)
94 97 perfchangegroupchangelog
95 98 Benchmark producing a changelog group for a changegroup.
96 99 perfchangeset
97 100 (no help text available)
98 101 perfctxfiles (no help text available)
99 102 perfdiffwd Profile diff of working directory changes
100 103 perfdirfoldmap
101 104 (no help text available)
102 105 perfdirs (no help text available)
103 106 perfdirstate (no help text available)
104 107 perfdirstatedirs
105 108 (no help text available)
106 109 perfdirstatefoldmap
107 110 (no help text available)
108 111 perfdirstatewrite
109 112 (no help text available)
110 113 perfdiscovery
111 114 benchmark discovery between local repo and the peer at given
112 115 path
113 116 perffncacheencode
114 117 (no help text available)
115 118 perffncacheload
116 119 (no help text available)
117 120 perffncachewrite
118 121 (no help text available)
119 122 perfheads benchmark the computation of a changelog heads
120 123 perfhelper-pathcopies
121 124 find statistic about potential parameters for the
122 125 'perftracecopies'
123 126 perfignore benchmark operation related to computing ignore
124 127 perfindex benchmark index creation time followed by a lookup
125 128 perflinelogedits
126 129 (no help text available)
127 130 perfloadmarkers
128 131 benchmark the time to parse the on-disk markers for a repo
129 132 perflog (no help text available)
130 133 perflookup (no help text available)
131 134 perflrucachedict
132 135 (no help text available)
133 136 perfmanifest benchmark the time to read a manifest from disk and return a
134 137 usable
135 138 perfmergecalculate
136 139 (no help text available)
137 140 perfmoonwalk benchmark walking the changelog backwards
138 141 perfnodelookup
139 142 (no help text available)
140 143 perfnodemap benchmark the time necessary to look up revision from a cold
141 144 nodemap
142 145 perfparents benchmark the time necessary to fetch one changeset's parents.
143 146 perfpathcopies
144 147 benchmark the copy tracing logic
145 148 perfphases benchmark phasesets computation
146 149 perfphasesremote
147 150 benchmark time needed to analyse phases of the remote server
148 151 perfprogress printing of progress bars
149 152 perfrawfiles (no help text available)
150 153 perfrevlogchunks
151 154 Benchmark operations on revlog chunks.
152 155 perfrevlogindex
153 156 Benchmark operations against a revlog index.
154 157 perfrevlogrevision
155 158 Benchmark obtaining a revlog revision.
156 159 perfrevlogrevisions
157 160 Benchmark reading a series of revisions from a revlog.
158 161 perfrevlogwrite
159 162 Benchmark writing a series of revisions to a revlog.
160 163 perfrevrange (no help text available)
161 164 perfrevset benchmark the execution time of a revset
162 165 perfstartup (no help text available)
163 166 perfstatus (no help text available)
164 167 perftags (no help text available)
165 168 perftemplating
166 169 test the rendering time of a given template
167 170 perfunidiff benchmark a unified diff between revisions
168 171 perfvolatilesets
169 172 benchmark the computation of various volatile set
170 173 perfwalk (no help text available)
171 174 perfwrite microbenchmark ui.write
172 175
173 176 (use 'hg help -v perf' to show built-in aliases and global options)
174 177 $ hg perfaddremove
175 178 $ hg perfancestors
176 179 $ hg perfancestorset 2
177 180 $ hg perfannotate a
178 181 $ hg perfbdiff -c 1
179 182 $ hg perfbdiff --alldata 1
180 183 $ hg perfunidiff -c 1
181 184 $ hg perfunidiff --alldata 1
182 185 $ hg perfbookmarks
183 186 $ hg perfbranchmap
184 187 $ hg perfbranchmapload
185 188 $ hg perfbranchmapupdate --base "not tip" --target "tip"
186 189 benchmark of branchmap with 3 revisions with 1 new ones
187 190 $ hg perfcca
188 191 $ hg perfchangegroupchangelog
189 192 $ hg perfchangegroupchangelog --cgversion 01
190 193 $ hg perfchangeset 2
191 194 $ hg perfctxfiles 2
192 195 $ hg perfdiffwd
193 196 $ hg perfdirfoldmap
194 197 $ hg perfdirs
195 198 $ hg perfdirstate
196 199 $ hg perfdirstatedirs
197 200 $ hg perfdirstatefoldmap
198 201 $ hg perfdirstatewrite
199 202 #if repofncache
200 203 $ hg perffncacheencode
201 204 $ hg perffncacheload
202 205 $ hg debugrebuildfncache
203 206 fncache already up to date
204 207 $ hg perffncachewrite
205 208 $ hg debugrebuildfncache
206 209 fncache already up to date
207 210 #endif
208 211 $ hg perfheads
209 212 $ hg perfignore
210 213 $ hg perfindex
211 214 $ hg perflinelogedits -n 1
212 215 $ hg perfloadmarkers
213 216 $ hg perflog
214 217 $ hg perflookup 2
215 218 $ hg perflrucache
216 219 $ hg perfmanifest 2
217 220 $ hg perfmanifest -m 44fe2c8352bb3a478ffd7d8350bbc721920134d1
218 221 $ hg perfmanifest -m 44fe2c8352bb
219 222 abort: manifest revision must be integer or full node
220 223 [255]
221 224 $ hg perfmergecalculate -r 3
222 225 $ hg perfmoonwalk
223 226 $ hg perfnodelookup 2
224 227 $ hg perfpathcopies 1 2
225 228 $ hg perfprogress --total 1000
226 229 $ hg perfrawfiles 2
227 230 $ hg perfrevlogindex -c
228 231 #if reporevlogstore
229 232 $ hg perfrevlogrevisions .hg/store/data/a.i
230 233 #endif
231 234 $ hg perfrevlogrevision -m 0
232 235 $ hg perfrevlogchunks -c
233 236 $ hg perfrevrange
234 237 $ hg perfrevset 'all()'
235 238 $ hg perfstartup
236 239 $ hg perfstatus
237 240 $ hg perftags
238 241 $ hg perftemplating
239 242 $ hg perfvolatilesets
240 243 $ hg perfwalk
241 244 $ hg perfparents
242 245 $ hg perfdiscovery -q .
243 246
244 247 Test run control
245 248 ----------------
246 249
247 250 Simple single entry
248 251
249 252 $ hg perfparents --config perf.stub=no --config perf.run-limits='0.000000001-15'
250 253 ! wall * comb * user * sys * (best of 15) (glob)
251 254
252 255 Multiple entries
253 256
254 257 $ hg perfparents --config perf.stub=no --config perf.run-limits='500000-1, 0.000000001-5'
255 258 ! wall * comb * user * sys * (best of 5) (glob)
256 259
257 260 error case are ignored
258 261
259 262 $ hg perfparents --config perf.stub=no --config perf.run-limits='500, 0.000000001-5'
260 263 malformatted run limit entry, missing "-": 500
261 264 ! wall * comb * user * sys * (best of 5) (glob)
262 265 $ hg perfparents --config perf.stub=no --config perf.run-limits='aaa-12, 0.000000001-5'
263 266 malformatted run limit entry, could not convert string to float: aaa: aaa-12 (no-py3 !)
264 267 malformatted run limit entry, could not convert string to float: 'aaa': aaa-12 (py3 !)
265 268 ! wall * comb * user * sys * (best of 5) (glob)
266 269 $ hg perfparents --config perf.stub=no --config perf.run-limits='12-aaaaaa, 0.000000001-5'
267 270 malformatted run limit entry, invalid literal for int() with base 10: 'aaaaaa': 12-aaaaaa
268 271 ! wall * comb * user * sys * (best of 5) (glob)
269 272
270 273 test actual output
271 274 ------------------
272 275
273 276 normal output:
274 277
275 278 $ hg perfheads --config perf.stub=no
276 279 ! wall * comb * user * sys * (best of *) (glob)
277 280
278 281 detailed output:
279 282
280 283 $ hg perfheads --config perf.all-timing=yes --config perf.stub=no
281 284 ! wall * comb * user * sys * (best of *) (glob)
282 285 ! wall * comb * user * sys * (max of *) (glob)
283 286 ! wall * comb * user * sys * (avg of *) (glob)
284 287 ! wall * comb * user * sys * (median of *) (glob)
285 288
286 289 test json output
287 290 ----------------
288 291
289 292 normal output:
290 293
291 294 $ hg perfheads --template json --config perf.stub=no
292 295 [
293 296 {
294 297 "comb": *, (glob)
295 298 "count": *, (glob)
296 299 "sys": *, (glob)
297 300 "user": *, (glob)
298 301 "wall": * (glob)
299 302 }
300 303 ]
301 304
302 305 detailed output:
303 306
304 307 $ hg perfheads --template json --config perf.all-timing=yes --config perf.stub=no
305 308 [
306 309 {
307 310 "avg.comb": *, (glob)
308 311 "avg.count": *, (glob)
309 312 "avg.sys": *, (glob)
310 313 "avg.user": *, (glob)
311 314 "avg.wall": *, (glob)
312 315 "comb": *, (glob)
313 316 "count": *, (glob)
314 317 "max.comb": *, (glob)
315 318 "max.count": *, (glob)
316 319 "max.sys": *, (glob)
317 320 "max.user": *, (glob)
318 321 "max.wall": *, (glob)
319 322 "median.comb": *, (glob)
320 323 "median.count": *, (glob)
321 324 "median.sys": *, (glob)
322 325 "median.user": *, (glob)
323 326 "median.wall": *, (glob)
324 327 "sys": *, (glob)
325 328 "user": *, (glob)
326 329 "wall": * (glob)
327 330 }
328 331 ]
329 332
333 Test pre-run feature
334 --------------------
335
336 (perf discovery has some spurious output)
337
338 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=0
339 ! wall * comb * user * sys * (best of 1) (glob)
340 searching for changes
341 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=1
342 ! wall * comb * user * sys * (best of 1) (glob)
343 searching for changes
344 searching for changes
345 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=3
346 ! wall * comb * user * sys * (best of 1) (glob)
347 searching for changes
348 searching for changes
349 searching for changes
350 searching for changes
351
330 352 Check perf.py for historical portability
331 353 ----------------------------------------
332 354
333 355 $ cd "$TESTDIR/.."
334 356
335 357 $ (testrepohg files -r 1.2 glob:mercurial/*.c glob:mercurial/*.py;
336 358 > testrepohg files -r tip glob:mercurial/*.c glob:mercurial/*.py) |
337 359 > "$TESTDIR"/check-perf-code.py contrib/perf.py
338 360 contrib/perf.py:\d+: (re)
339 361 > from mercurial import (
340 362 import newer module separately in try clause for early Mercurial
341 363 contrib/perf.py:\d+: (re)
342 364 > from mercurial import (
343 365 import newer module separately in try clause for early Mercurial
344 366 contrib/perf.py:\d+: (re)
345 367 > origindexpath = orig.opener.join(orig.indexfile)
346 368 use getvfs()/getsvfs() for early Mercurial
347 369 contrib/perf.py:\d+: (re)
348 370 > origdatapath = orig.opener.join(orig.datafile)
349 371 use getvfs()/getsvfs() for early Mercurial
350 372 contrib/perf.py:\d+: (re)
351 373 > vfs = vfsmod.vfs(tmpdir)
352 374 use getvfs()/getsvfs() for early Mercurial
353 375 contrib/perf.py:\d+: (re)
354 376 > vfs.options = getattr(orig.opener, 'options', None)
355 377 use getvfs()/getsvfs() for early Mercurial
356 378 [1]
General Comments 0
You need to be logged in to leave comments. Login now