##// END OF EJS Templates
perf: make perf.run-limits code work with Python 3...
Gregory Szorc -
r42230:912d82da default
parent child Browse files
Show More
@@ -1,2858 +1,2858
1 1 # perf.py - performance test routines
2 2 '''helper extension to measure performance
3 3
4 4 Configurations
5 5 ==============
6 6
7 7 ``perf``
8 8 --------
9 9
10 10 ``all-timing``
11 11 When set, additional statistics will be reported for each benchmark: best,
12 12 worst, median average. If not set only the best timing is reported
13 13 (default: off).
14 14
15 15 ``presleep``
16 16 number of second to wait before any group of runs (default: 1)
17 17
18 18 ``run-limits``
19 19 Control the number of runs each benchmark will perform. The option value
20 20 should be a list of `<time>-<numberofrun>` pairs. After each run the
21 21 conditions are considered in order with the following logic:
22 22
23 23 If benchmark has been running for <time> seconds, and we have performed
24 24 <numberofrun> iterations, stop the benchmark,
25 25
26 26 The default value is: `3.0-100, 10.0-3`
27 27
28 28 ``stub``
29 29 When set, benchmarks will only be run once, useful for testing
30 30 (default: off)
31 31 '''
32 32
33 33 # "historical portability" policy of perf.py:
34 34 #
35 35 # We have to do:
36 36 # - make perf.py "loadable" with as wide Mercurial version as possible
37 37 # This doesn't mean that perf commands work correctly with that Mercurial.
38 38 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
39 39 # - make historical perf command work correctly with as wide Mercurial
40 40 # version as possible
41 41 #
42 42 # We have to do, if possible with reasonable cost:
43 43 # - make recent perf command for historical feature work correctly
44 44 # with early Mercurial
45 45 #
46 46 # We don't have to do:
47 47 # - make perf command for recent feature work correctly with early
48 48 # Mercurial
49 49
50 50 from __future__ import absolute_import
51 51 import contextlib
52 52 import functools
53 53 import gc
54 54 import os
55 55 import random
56 56 import shutil
57 57 import struct
58 58 import sys
59 59 import tempfile
60 60 import threading
61 61 import time
62 62 from mercurial import (
63 63 changegroup,
64 64 cmdutil,
65 65 commands,
66 66 copies,
67 67 error,
68 68 extensions,
69 69 hg,
70 70 mdiff,
71 71 merge,
72 72 revlog,
73 73 util,
74 74 )
75 75
76 76 # for "historical portability":
77 77 # try to import modules separately (in dict order), and ignore
78 78 # failure, because these aren't available with early Mercurial
79 79 try:
80 80 from mercurial import branchmap # since 2.5 (or bcee63733aad)
81 81 except ImportError:
82 82 pass
83 83 try:
84 84 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
85 85 except ImportError:
86 86 pass
87 87 try:
88 88 from mercurial import registrar # since 3.7 (or 37d50250b696)
89 89 dir(registrar) # forcibly load it
90 90 except ImportError:
91 91 registrar = None
92 92 try:
93 93 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
94 94 except ImportError:
95 95 pass
96 96 try:
97 97 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
98 98 except ImportError:
99 99 pass
100 100 try:
101 101 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
102 102 except ImportError:
103 103 pass
104 104
105 105
106 106 def identity(a):
107 107 return a
108 108
109 109 try:
110 110 from mercurial import pycompat
111 111 getargspec = pycompat.getargspec # added to module after 4.5
112 112 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
113 113 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
114 114 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
115 115 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
116 116 if pycompat.ispy3:
117 117 _maxint = sys.maxsize # per py3 docs for replacing maxint
118 118 else:
119 119 _maxint = sys.maxint
120 120 except (ImportError, AttributeError):
121 121 import inspect
122 122 getargspec = inspect.getargspec
123 123 _byteskwargs = identity
124 124 fsencode = identity # no py3 support
125 125 _maxint = sys.maxint # no py3 support
126 126 _sysstr = lambda x: x # no py3 support
127 127 _xrange = xrange
128 128
129 129 try:
130 130 # 4.7+
131 131 queue = pycompat.queue.Queue
132 132 except (AttributeError, ImportError):
133 133 # <4.7.
134 134 try:
135 135 queue = pycompat.queue
136 136 except (AttributeError, ImportError):
137 137 queue = util.queue
138 138
139 139 try:
140 140 from mercurial import logcmdutil
141 141 makelogtemplater = logcmdutil.maketemplater
142 142 except (AttributeError, ImportError):
143 143 try:
144 144 makelogtemplater = cmdutil.makelogtemplater
145 145 except (AttributeError, ImportError):
146 146 makelogtemplater = None
147 147
148 148 # for "historical portability":
149 149 # define util.safehasattr forcibly, because util.safehasattr has been
150 150 # available since 1.9.3 (or 94b200a11cf7)
151 151 _undefined = object()
152 152 def safehasattr(thing, attr):
153 153 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
154 154 setattr(util, 'safehasattr', safehasattr)
155 155
156 156 # for "historical portability":
157 157 # define util.timer forcibly, because util.timer has been available
158 158 # since ae5d60bb70c9
159 159 if safehasattr(time, 'perf_counter'):
160 160 util.timer = time.perf_counter
161 161 elif os.name == b'nt':
162 162 util.timer = time.clock
163 163 else:
164 164 util.timer = time.time
165 165
166 166 # for "historical portability":
167 167 # use locally defined empty option list, if formatteropts isn't
168 168 # available, because commands.formatteropts has been available since
169 169 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
170 170 # available since 2.2 (or ae5f92e154d3)
171 171 formatteropts = getattr(cmdutil, "formatteropts",
172 172 getattr(commands, "formatteropts", []))
173 173
174 174 # for "historical portability":
175 175 # use locally defined option list, if debugrevlogopts isn't available,
176 176 # because commands.debugrevlogopts has been available since 3.7 (or
177 177 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
178 178 # since 1.9 (or a79fea6b3e77).
179 179 revlogopts = getattr(cmdutil, "debugrevlogopts",
180 180 getattr(commands, "debugrevlogopts", [
181 181 (b'c', b'changelog', False, (b'open changelog')),
182 182 (b'm', b'manifest', False, (b'open manifest')),
183 183 (b'', b'dir', False, (b'open directory manifest')),
184 184 ]))
185 185
186 186 cmdtable = {}
187 187
188 188 # for "historical portability":
189 189 # define parsealiases locally, because cmdutil.parsealiases has been
190 190 # available since 1.5 (or 6252852b4332)
191 191 def parsealiases(cmd):
192 192 return cmd.split(b"|")
193 193
194 194 if safehasattr(registrar, 'command'):
195 195 command = registrar.command(cmdtable)
196 196 elif safehasattr(cmdutil, 'command'):
197 197 command = cmdutil.command(cmdtable)
198 198 if b'norepo' not in getargspec(command).args:
199 199 # for "historical portability":
200 200 # wrap original cmdutil.command, because "norepo" option has
201 201 # been available since 3.1 (or 75a96326cecb)
202 202 _command = command
203 203 def command(name, options=(), synopsis=None, norepo=False):
204 204 if norepo:
205 205 commands.norepo += b' %s' % b' '.join(parsealiases(name))
206 206 return _command(name, list(options), synopsis)
207 207 else:
208 208 # for "historical portability":
209 209 # define "@command" annotation locally, because cmdutil.command
210 210 # has been available since 1.9 (or 2daa5179e73f)
211 211 def command(name, options=(), synopsis=None, norepo=False):
212 212 def decorator(func):
213 213 if synopsis:
214 214 cmdtable[name] = func, list(options), synopsis
215 215 else:
216 216 cmdtable[name] = func, list(options)
217 217 if norepo:
218 218 commands.norepo += b' %s' % b' '.join(parsealiases(name))
219 219 return func
220 220 return decorator
221 221
222 222 try:
223 223 import mercurial.registrar
224 224 import mercurial.configitems
225 225 configtable = {}
226 226 configitem = mercurial.registrar.configitem(configtable)
227 227 configitem(b'perf', b'presleep',
228 228 default=mercurial.configitems.dynamicdefault,
229 229 )
230 230 configitem(b'perf', b'stub',
231 231 default=mercurial.configitems.dynamicdefault,
232 232 )
233 233 configitem(b'perf', b'parentscount',
234 234 default=mercurial.configitems.dynamicdefault,
235 235 )
236 236 configitem(b'perf', b'all-timing',
237 237 default=mercurial.configitems.dynamicdefault,
238 238 )
239 239 configitem(b'perf', b'run-limits',
240 240 default=mercurial.configitems.dynamicdefault,
241 241 )
242 242 except (ImportError, AttributeError):
243 243 pass
244 244
245 245 def getlen(ui):
246 246 if ui.configbool(b"perf", b"stub", False):
247 247 return lambda x: 1
248 248 return len
249 249
250 250 def gettimer(ui, opts=None):
251 251 """return a timer function and formatter: (timer, formatter)
252 252
253 253 This function exists to gather the creation of formatter in a single
254 254 place instead of duplicating it in all performance commands."""
255 255
256 256 # enforce an idle period before execution to counteract power management
257 257 # experimental config: perf.presleep
258 258 time.sleep(getint(ui, b"perf", b"presleep", 1))
259 259
260 260 if opts is None:
261 261 opts = {}
262 262 # redirect all to stderr unless buffer api is in use
263 263 if not ui._buffers:
264 264 ui = ui.copy()
265 265 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
266 266 if uifout:
267 267 # for "historical portability":
268 268 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
269 269 uifout.set(ui.ferr)
270 270
271 271 # get a formatter
272 272 uiformatter = getattr(ui, 'formatter', None)
273 273 if uiformatter:
274 274 fm = uiformatter(b'perf', opts)
275 275 else:
276 276 # for "historical portability":
277 277 # define formatter locally, because ui.formatter has been
278 278 # available since 2.2 (or ae5f92e154d3)
279 279 from mercurial import node
280 280 class defaultformatter(object):
281 281 """Minimized composition of baseformatter and plainformatter
282 282 """
283 283 def __init__(self, ui, topic, opts):
284 284 self._ui = ui
285 285 if ui.debugflag:
286 286 self.hexfunc = node.hex
287 287 else:
288 288 self.hexfunc = node.short
289 289 def __nonzero__(self):
290 290 return False
291 291 __bool__ = __nonzero__
292 292 def startitem(self):
293 293 pass
294 294 def data(self, **data):
295 295 pass
296 296 def write(self, fields, deftext, *fielddata, **opts):
297 297 self._ui.write(deftext % fielddata, **opts)
298 298 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
299 299 if cond:
300 300 self._ui.write(deftext % fielddata, **opts)
301 301 def plain(self, text, **opts):
302 302 self._ui.write(text, **opts)
303 303 def end(self):
304 304 pass
305 305 fm = defaultformatter(ui, b'perf', opts)
306 306
307 307 # stub function, runs code only once instead of in a loop
308 308 # experimental config: perf.stub
309 309 if ui.configbool(b"perf", b"stub", False):
310 310 return functools.partial(stub_timer, fm), fm
311 311
312 312 # experimental config: perf.all-timing
313 313 displayall = ui.configbool(b"perf", b"all-timing", False)
314 314
315 315 # experimental config: perf.run-limits
316 316 limitspec = ui.configlist(b"perf", b"run-limits", [])
317 317 limits = []
318 318 for item in limitspec:
319 parts = item.split('-', 1)
319 parts = item.split(b'-', 1)
320 320 if len(parts) < 2:
321 ui.warn(('malformatted run limit entry, missing "-": %s\n'
321 ui.warn((b'malformatted run limit entry, missing "-": %s\n'
322 322 % item))
323 323 continue
324 324 try:
325 time_limit = float(parts[0])
325 time_limit = float(pycompat.sysstr(parts[0]))
326 326 except ValueError as e:
327 ui.warn(('malformatted run limit entry, %s: %s\n'
328 % (e, item)))
327 ui.warn((b'malformatted run limit entry, %s: %s\n'
328 % (pycompat.bytestr(e), item)))
329 329 continue
330 330 try:
331 run_limit = int(parts[1])
331 run_limit = int(pycompat.sysstr(parts[1]))
332 332 except ValueError as e:
333 ui.warn(('malformatted run limit entry, %s: %s\n'
334 % (e, item)))
333 ui.warn((b'malformatted run limit entry, %s: %s\n'
334 % (pycompat.bytestr(e), item)))
335 335 continue
336 336 limits.append((time_limit, run_limit))
337 337 if not limits:
338 338 limits = DEFAULTLIMITS
339 339
340 340 t = functools.partial(_timer, fm, displayall=displayall, limits=limits)
341 341 return t, fm
342 342
343 343 def stub_timer(fm, func, setup=None, title=None):
344 344 if setup is not None:
345 345 setup()
346 346 func()
347 347
348 348 @contextlib.contextmanager
349 349 def timeone():
350 350 r = []
351 351 ostart = os.times()
352 352 cstart = util.timer()
353 353 yield r
354 354 cstop = util.timer()
355 355 ostop = os.times()
356 356 a, b = ostart, ostop
357 357 r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
358 358
359 359
360 360 # list of stop condition (elapsed time, minimal run count)
361 361 DEFAULTLIMITS = (
362 362 (3.0, 100),
363 363 (10.0, 3),
364 364 )
365 365
366 366 def _timer(fm, func, setup=None, title=None, displayall=False,
367 367 limits=DEFAULTLIMITS):
368 368 gc.collect()
369 369 results = []
370 370 begin = util.timer()
371 371 count = 0
372 372 keepgoing = True
373 373 while keepgoing:
374 374 if setup is not None:
375 375 setup()
376 376 with timeone() as item:
377 377 r = func()
378 378 count += 1
379 379 results.append(item[0])
380 380 cstop = util.timer()
381 381 # Look for a stop condition.
382 382 elapsed = cstop - begin
383 383 for t, mincount in limits:
384 384 if elapsed >= t and count >= mincount:
385 385 keepgoing = False
386 386 break
387 387
388 388 formatone(fm, results, title=title, result=r,
389 389 displayall=displayall)
390 390
391 391 def formatone(fm, timings, title=None, result=None, displayall=False):
392 392
393 393 count = len(timings)
394 394
395 395 fm.startitem()
396 396
397 397 if title:
398 398 fm.write(b'title', b'! %s\n', title)
399 399 if result:
400 400 fm.write(b'result', b'! result: %s\n', result)
401 401 def display(role, entry):
402 402 prefix = b''
403 403 if role != b'best':
404 404 prefix = b'%s.' % role
405 405 fm.plain(b'!')
406 406 fm.write(prefix + b'wall', b' wall %f', entry[0])
407 407 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
408 408 fm.write(prefix + b'user', b' user %f', entry[1])
409 409 fm.write(prefix + b'sys', b' sys %f', entry[2])
410 410 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
411 411 fm.plain(b'\n')
412 412 timings.sort()
413 413 min_val = timings[0]
414 414 display(b'best', min_val)
415 415 if displayall:
416 416 max_val = timings[-1]
417 417 display(b'max', max_val)
418 418 avg = tuple([sum(x) / count for x in zip(*timings)])
419 419 display(b'avg', avg)
420 420 median = timings[len(timings) // 2]
421 421 display(b'median', median)
422 422
423 423 # utilities for historical portability
424 424
425 425 def getint(ui, section, name, default):
426 426 # for "historical portability":
427 427 # ui.configint has been available since 1.9 (or fa2b596db182)
428 428 v = ui.config(section, name, None)
429 429 if v is None:
430 430 return default
431 431 try:
432 432 return int(v)
433 433 except ValueError:
434 434 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
435 435 % (section, name, v))
436 436
437 437 def safeattrsetter(obj, name, ignoremissing=False):
438 438 """Ensure that 'obj' has 'name' attribute before subsequent setattr
439 439
440 440 This function is aborted, if 'obj' doesn't have 'name' attribute
441 441 at runtime. This avoids overlooking removal of an attribute, which
442 442 breaks assumption of performance measurement, in the future.
443 443
444 444 This function returns the object to (1) assign a new value, and
445 445 (2) restore an original value to the attribute.
446 446
447 447 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
448 448 abortion, and this function returns None. This is useful to
449 449 examine an attribute, which isn't ensured in all Mercurial
450 450 versions.
451 451 """
452 452 if not util.safehasattr(obj, name):
453 453 if ignoremissing:
454 454 return None
455 455 raise error.Abort((b"missing attribute %s of %s might break assumption"
456 456 b" of performance measurement") % (name, obj))
457 457
458 458 origvalue = getattr(obj, _sysstr(name))
459 459 class attrutil(object):
460 460 def set(self, newvalue):
461 461 setattr(obj, _sysstr(name), newvalue)
462 462 def restore(self):
463 463 setattr(obj, _sysstr(name), origvalue)
464 464
465 465 return attrutil()
466 466
467 467 # utilities to examine each internal API changes
468 468
469 469 def getbranchmapsubsettable():
470 470 # for "historical portability":
471 471 # subsettable is defined in:
472 472 # - branchmap since 2.9 (or 175c6fd8cacc)
473 473 # - repoview since 2.5 (or 59a9f18d4587)
474 474 for mod in (branchmap, repoview):
475 475 subsettable = getattr(mod, 'subsettable', None)
476 476 if subsettable:
477 477 return subsettable
478 478
479 479 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
480 480 # branchmap and repoview modules exist, but subsettable attribute
481 481 # doesn't)
482 482 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
483 483 hint=b"use 2.5 or later")
484 484
485 485 def getsvfs(repo):
486 486 """Return appropriate object to access files under .hg/store
487 487 """
488 488 # for "historical portability":
489 489 # repo.svfs has been available since 2.3 (or 7034365089bf)
490 490 svfs = getattr(repo, 'svfs', None)
491 491 if svfs:
492 492 return svfs
493 493 else:
494 494 return getattr(repo, 'sopener')
495 495
496 496 def getvfs(repo):
497 497 """Return appropriate object to access files under .hg
498 498 """
499 499 # for "historical portability":
500 500 # repo.vfs has been available since 2.3 (or 7034365089bf)
501 501 vfs = getattr(repo, 'vfs', None)
502 502 if vfs:
503 503 return vfs
504 504 else:
505 505 return getattr(repo, 'opener')
506 506
507 507 def repocleartagscachefunc(repo):
508 508 """Return the function to clear tags cache according to repo internal API
509 509 """
510 510 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
511 511 # in this case, setattr(repo, '_tagscache', None) or so isn't
512 512 # correct way to clear tags cache, because existing code paths
513 513 # expect _tagscache to be a structured object.
514 514 def clearcache():
515 515 # _tagscache has been filteredpropertycache since 2.5 (or
516 516 # 98c867ac1330), and delattr() can't work in such case
517 517 if b'_tagscache' in vars(repo):
518 518 del repo.__dict__[b'_tagscache']
519 519 return clearcache
520 520
521 521 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
522 522 if repotags: # since 1.4 (or 5614a628d173)
523 523 return lambda : repotags.set(None)
524 524
525 525 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
526 526 if repotagscache: # since 0.6 (or d7df759d0e97)
527 527 return lambda : repotagscache.set(None)
528 528
529 529 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
530 530 # this point, but it isn't so problematic, because:
531 531 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
532 532 # in perftags() causes failure soon
533 533 # - perf.py itself has been available since 1.1 (or eb240755386d)
534 534 raise error.Abort((b"tags API of this hg command is unknown"))
535 535
536 536 # utilities to clear cache
537 537
538 538 def clearfilecache(obj, attrname):
539 539 unfiltered = getattr(obj, 'unfiltered', None)
540 540 if unfiltered is not None:
541 541 obj = obj.unfiltered()
542 542 if attrname in vars(obj):
543 543 delattr(obj, attrname)
544 544 obj._filecache.pop(attrname, None)
545 545
546 546 def clearchangelog(repo):
547 547 if repo is not repo.unfiltered():
548 548 object.__setattr__(repo, r'_clcachekey', None)
549 549 object.__setattr__(repo, r'_clcache', None)
550 550 clearfilecache(repo.unfiltered(), 'changelog')
551 551
552 552 # perf commands
553 553
554 554 @command(b'perfwalk', formatteropts)
555 555 def perfwalk(ui, repo, *pats, **opts):
556 556 opts = _byteskwargs(opts)
557 557 timer, fm = gettimer(ui, opts)
558 558 m = scmutil.match(repo[None], pats, {})
559 559 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
560 560 ignored=False))))
561 561 fm.end()
562 562
563 563 @command(b'perfannotate', formatteropts)
564 564 def perfannotate(ui, repo, f, **opts):
565 565 opts = _byteskwargs(opts)
566 566 timer, fm = gettimer(ui, opts)
567 567 fc = repo[b'.'][f]
568 568 timer(lambda: len(fc.annotate(True)))
569 569 fm.end()
570 570
571 571 @command(b'perfstatus',
572 572 [(b'u', b'unknown', False,
573 573 b'ask status to look for unknown files')] + formatteropts)
574 574 def perfstatus(ui, repo, **opts):
575 575 opts = _byteskwargs(opts)
576 576 #m = match.always(repo.root, repo.getcwd())
577 577 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
578 578 # False))))
579 579 timer, fm = gettimer(ui, opts)
580 580 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
581 581 fm.end()
582 582
583 583 @command(b'perfaddremove', formatteropts)
584 584 def perfaddremove(ui, repo, **opts):
585 585 opts = _byteskwargs(opts)
586 586 timer, fm = gettimer(ui, opts)
587 587 try:
588 588 oldquiet = repo.ui.quiet
589 589 repo.ui.quiet = True
590 590 matcher = scmutil.match(repo[None])
591 591 opts[b'dry_run'] = True
592 592 if b'uipathfn' in getargspec(scmutil.addremove).args:
593 593 uipathfn = scmutil.getuipathfn(repo)
594 594 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
595 595 else:
596 596 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
597 597 finally:
598 598 repo.ui.quiet = oldquiet
599 599 fm.end()
600 600
601 601 def clearcaches(cl):
602 602 # behave somewhat consistently across internal API changes
603 603 if util.safehasattr(cl, b'clearcaches'):
604 604 cl.clearcaches()
605 605 elif util.safehasattr(cl, b'_nodecache'):
606 606 from mercurial.node import nullid, nullrev
607 607 cl._nodecache = {nullid: nullrev}
608 608 cl._nodepos = None
609 609
610 610 @command(b'perfheads', formatteropts)
611 611 def perfheads(ui, repo, **opts):
612 612 """benchmark the computation of a changelog heads"""
613 613 opts = _byteskwargs(opts)
614 614 timer, fm = gettimer(ui, opts)
615 615 cl = repo.changelog
616 616 def s():
617 617 clearcaches(cl)
618 618 def d():
619 619 len(cl.headrevs())
620 620 timer(d, setup=s)
621 621 fm.end()
622 622
623 623 @command(b'perftags', formatteropts+
624 624 [
625 625 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
626 626 ])
627 627 def perftags(ui, repo, **opts):
628 628 opts = _byteskwargs(opts)
629 629 timer, fm = gettimer(ui, opts)
630 630 repocleartagscache = repocleartagscachefunc(repo)
631 631 clearrevlogs = opts[b'clear_revlogs']
632 632 def s():
633 633 if clearrevlogs:
634 634 clearchangelog(repo)
635 635 clearfilecache(repo.unfiltered(), 'manifest')
636 636 repocleartagscache()
637 637 def t():
638 638 return len(repo.tags())
639 639 timer(t, setup=s)
640 640 fm.end()
641 641
642 642 @command(b'perfancestors', formatteropts)
643 643 def perfancestors(ui, repo, **opts):
644 644 opts = _byteskwargs(opts)
645 645 timer, fm = gettimer(ui, opts)
646 646 heads = repo.changelog.headrevs()
647 647 def d():
648 648 for a in repo.changelog.ancestors(heads):
649 649 pass
650 650 timer(d)
651 651 fm.end()
652 652
653 653 @command(b'perfancestorset', formatteropts)
654 654 def perfancestorset(ui, repo, revset, **opts):
655 655 opts = _byteskwargs(opts)
656 656 timer, fm = gettimer(ui, opts)
657 657 revs = repo.revs(revset)
658 658 heads = repo.changelog.headrevs()
659 659 def d():
660 660 s = repo.changelog.ancestors(heads)
661 661 for rev in revs:
662 662 rev in s
663 663 timer(d)
664 664 fm.end()
665 665
666 666 @command(b'perfdiscovery', formatteropts, b'PATH')
667 667 def perfdiscovery(ui, repo, path, **opts):
668 668 """benchmark discovery between local repo and the peer at given path
669 669 """
670 670 repos = [repo, None]
671 671 timer, fm = gettimer(ui, opts)
672 672 path = ui.expandpath(path)
673 673
674 674 def s():
675 675 repos[1] = hg.peer(ui, opts, path)
676 676 def d():
677 677 setdiscovery.findcommonheads(ui, *repos)
678 678 timer(d, setup=s)
679 679 fm.end()
680 680
681 681 @command(b'perfbookmarks', formatteropts +
682 682 [
683 683 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
684 684 ])
685 685 def perfbookmarks(ui, repo, **opts):
686 686 """benchmark parsing bookmarks from disk to memory"""
687 687 opts = _byteskwargs(opts)
688 688 timer, fm = gettimer(ui, opts)
689 689
690 690 clearrevlogs = opts[b'clear_revlogs']
691 691 def s():
692 692 if clearrevlogs:
693 693 clearchangelog(repo)
694 694 clearfilecache(repo, b'_bookmarks')
695 695 def d():
696 696 repo._bookmarks
697 697 timer(d, setup=s)
698 698 fm.end()
699 699
700 700 @command(b'perfbundleread', formatteropts, b'BUNDLE')
701 701 def perfbundleread(ui, repo, bundlepath, **opts):
702 702 """Benchmark reading of bundle files.
703 703
704 704 This command is meant to isolate the I/O part of bundle reading as
705 705 much as possible.
706 706 """
707 707 from mercurial import (
708 708 bundle2,
709 709 exchange,
710 710 streamclone,
711 711 )
712 712
713 713 opts = _byteskwargs(opts)
714 714
715 715 def makebench(fn):
716 716 def run():
717 717 with open(bundlepath, b'rb') as fh:
718 718 bundle = exchange.readbundle(ui, fh, bundlepath)
719 719 fn(bundle)
720 720
721 721 return run
722 722
723 723 def makereadnbytes(size):
724 724 def run():
725 725 with open(bundlepath, b'rb') as fh:
726 726 bundle = exchange.readbundle(ui, fh, bundlepath)
727 727 while bundle.read(size):
728 728 pass
729 729
730 730 return run
731 731
732 732 def makestdioread(size):
733 733 def run():
734 734 with open(bundlepath, b'rb') as fh:
735 735 while fh.read(size):
736 736 pass
737 737
738 738 return run
739 739
740 740 # bundle1
741 741
742 742 def deltaiter(bundle):
743 743 for delta in bundle.deltaiter():
744 744 pass
745 745
746 746 def iterchunks(bundle):
747 747 for chunk in bundle.getchunks():
748 748 pass
749 749
750 750 # bundle2
751 751
752 752 def forwardchunks(bundle):
753 753 for chunk in bundle._forwardchunks():
754 754 pass
755 755
756 756 def iterparts(bundle):
757 757 for part in bundle.iterparts():
758 758 pass
759 759
760 760 def iterpartsseekable(bundle):
761 761 for part in bundle.iterparts(seekable=True):
762 762 pass
763 763
764 764 def seek(bundle):
765 765 for part in bundle.iterparts(seekable=True):
766 766 part.seek(0, os.SEEK_END)
767 767
768 768 def makepartreadnbytes(size):
769 769 def run():
770 770 with open(bundlepath, b'rb') as fh:
771 771 bundle = exchange.readbundle(ui, fh, bundlepath)
772 772 for part in bundle.iterparts():
773 773 while part.read(size):
774 774 pass
775 775
776 776 return run
777 777
778 778 benches = [
779 779 (makestdioread(8192), b'read(8k)'),
780 780 (makestdioread(16384), b'read(16k)'),
781 781 (makestdioread(32768), b'read(32k)'),
782 782 (makestdioread(131072), b'read(128k)'),
783 783 ]
784 784
785 785 with open(bundlepath, b'rb') as fh:
786 786 bundle = exchange.readbundle(ui, fh, bundlepath)
787 787
788 788 if isinstance(bundle, changegroup.cg1unpacker):
789 789 benches.extend([
790 790 (makebench(deltaiter), b'cg1 deltaiter()'),
791 791 (makebench(iterchunks), b'cg1 getchunks()'),
792 792 (makereadnbytes(8192), b'cg1 read(8k)'),
793 793 (makereadnbytes(16384), b'cg1 read(16k)'),
794 794 (makereadnbytes(32768), b'cg1 read(32k)'),
795 795 (makereadnbytes(131072), b'cg1 read(128k)'),
796 796 ])
797 797 elif isinstance(bundle, bundle2.unbundle20):
798 798 benches.extend([
799 799 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
800 800 (makebench(iterparts), b'bundle2 iterparts()'),
801 801 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
802 802 (makebench(seek), b'bundle2 part seek()'),
803 803 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
804 804 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
805 805 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
806 806 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
807 807 ])
808 808 elif isinstance(bundle, streamclone.streamcloneapplier):
809 809 raise error.Abort(b'stream clone bundles not supported')
810 810 else:
811 811 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
812 812
813 813 for fn, title in benches:
814 814 timer, fm = gettimer(ui, opts)
815 815 timer(fn, title=title)
816 816 fm.end()
817 817
818 818 @command(b'perfchangegroupchangelog', formatteropts +
819 819 [(b'', b'cgversion', b'02', b'changegroup version'),
820 820 (b'r', b'rev', b'', b'revisions to add to changegroup')])
821 821 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
822 822 """Benchmark producing a changelog group for a changegroup.
823 823
824 824 This measures the time spent processing the changelog during a
825 825 bundle operation. This occurs during `hg bundle` and on a server
826 826 processing a `getbundle` wire protocol request (handles clones
827 827 and pull requests).
828 828
829 829 By default, all revisions are added to the changegroup.
830 830 """
831 831 opts = _byteskwargs(opts)
832 832 cl = repo.changelog
833 833 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
834 834 bundler = changegroup.getbundler(cgversion, repo)
835 835
836 836 def d():
837 837 state, chunks = bundler._generatechangelog(cl, nodes)
838 838 for chunk in chunks:
839 839 pass
840 840
841 841 timer, fm = gettimer(ui, opts)
842 842
843 843 # Terminal printing can interfere with timing. So disable it.
844 844 with ui.configoverride({(b'progress', b'disable'): True}):
845 845 timer(d)
846 846
847 847 fm.end()
848 848
849 849 @command(b'perfdirs', formatteropts)
850 850 def perfdirs(ui, repo, **opts):
851 851 opts = _byteskwargs(opts)
852 852 timer, fm = gettimer(ui, opts)
853 853 dirstate = repo.dirstate
854 854 b'a' in dirstate
855 855 def d():
856 856 dirstate.hasdir(b'a')
857 857 del dirstate._map._dirs
858 858 timer(d)
859 859 fm.end()
860 860
861 861 @command(b'perfdirstate', formatteropts)
862 862 def perfdirstate(ui, repo, **opts):
863 863 opts = _byteskwargs(opts)
864 864 timer, fm = gettimer(ui, opts)
865 865 b"a" in repo.dirstate
866 866 def d():
867 867 repo.dirstate.invalidate()
868 868 b"a" in repo.dirstate
869 869 timer(d)
870 870 fm.end()
871 871
872 872 @command(b'perfdirstatedirs', formatteropts)
873 873 def perfdirstatedirs(ui, repo, **opts):
874 874 opts = _byteskwargs(opts)
875 875 timer, fm = gettimer(ui, opts)
876 876 b"a" in repo.dirstate
877 877 def d():
878 878 repo.dirstate.hasdir(b"a")
879 879 del repo.dirstate._map._dirs
880 880 timer(d)
881 881 fm.end()
882 882
883 883 @command(b'perfdirstatefoldmap', formatteropts)
884 884 def perfdirstatefoldmap(ui, repo, **opts):
885 885 opts = _byteskwargs(opts)
886 886 timer, fm = gettimer(ui, opts)
887 887 dirstate = repo.dirstate
888 888 b'a' in dirstate
889 889 def d():
890 890 dirstate._map.filefoldmap.get(b'a')
891 891 del dirstate._map.filefoldmap
892 892 timer(d)
893 893 fm.end()
894 894
895 895 @command(b'perfdirfoldmap', formatteropts)
896 896 def perfdirfoldmap(ui, repo, **opts):
897 897 opts = _byteskwargs(opts)
898 898 timer, fm = gettimer(ui, opts)
899 899 dirstate = repo.dirstate
900 900 b'a' in dirstate
901 901 def d():
902 902 dirstate._map.dirfoldmap.get(b'a')
903 903 del dirstate._map.dirfoldmap
904 904 del dirstate._map._dirs
905 905 timer(d)
906 906 fm.end()
907 907
908 908 @command(b'perfdirstatewrite', formatteropts)
909 909 def perfdirstatewrite(ui, repo, **opts):
910 910 opts = _byteskwargs(opts)
911 911 timer, fm = gettimer(ui, opts)
912 912 ds = repo.dirstate
913 913 b"a" in ds
914 914 def d():
915 915 ds._dirty = True
916 916 ds.write(repo.currenttransaction())
917 917 timer(d)
918 918 fm.end()
919 919
920 920 @command(b'perfmergecalculate',
921 921 [(b'r', b'rev', b'.', b'rev to merge against')] + formatteropts)
922 922 def perfmergecalculate(ui, repo, rev, **opts):
923 923 opts = _byteskwargs(opts)
924 924 timer, fm = gettimer(ui, opts)
925 925 wctx = repo[None]
926 926 rctx = scmutil.revsingle(repo, rev, rev)
927 927 ancestor = wctx.ancestor(rctx)
928 928 # we don't want working dir files to be stat'd in the benchmark, so prime
929 929 # that cache
930 930 wctx.dirty()
931 931 def d():
932 932 # acceptremote is True because we don't want prompts in the middle of
933 933 # our benchmark
934 934 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
935 935 acceptremote=True, followcopies=True)
936 936 timer(d)
937 937 fm.end()
938 938
939 939 @command(b'perfpathcopies', [], b"REV REV")
940 940 def perfpathcopies(ui, repo, rev1, rev2, **opts):
941 941 """benchmark the copy tracing logic"""
942 942 opts = _byteskwargs(opts)
943 943 timer, fm = gettimer(ui, opts)
944 944 ctx1 = scmutil.revsingle(repo, rev1, rev1)
945 945 ctx2 = scmutil.revsingle(repo, rev2, rev2)
946 946 def d():
947 947 copies.pathcopies(ctx1, ctx2)
948 948 timer(d)
949 949 fm.end()
950 950
951 951 @command(b'perfphases',
952 952 [(b'', b'full', False, b'include file reading time too'),
953 953 ], b"")
954 954 def perfphases(ui, repo, **opts):
955 955 """benchmark phasesets computation"""
956 956 opts = _byteskwargs(opts)
957 957 timer, fm = gettimer(ui, opts)
958 958 _phases = repo._phasecache
959 959 full = opts.get(b'full')
960 960 def d():
961 961 phases = _phases
962 962 if full:
963 963 clearfilecache(repo, b'_phasecache')
964 964 phases = repo._phasecache
965 965 phases.invalidate()
966 966 phases.loadphaserevs(repo)
967 967 timer(d)
968 968 fm.end()
969 969
970 970 @command(b'perfphasesremote',
971 971 [], b"[DEST]")
972 972 def perfphasesremote(ui, repo, dest=None, **opts):
973 973 """benchmark time needed to analyse phases of the remote server"""
974 974 from mercurial.node import (
975 975 bin,
976 976 )
977 977 from mercurial import (
978 978 exchange,
979 979 hg,
980 980 phases,
981 981 )
982 982 opts = _byteskwargs(opts)
983 983 timer, fm = gettimer(ui, opts)
984 984
985 985 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
986 986 if not path:
987 987 raise error.Abort((b'default repository not configured!'),
988 988 hint=(b"see 'hg help config.paths'"))
989 989 dest = path.pushloc or path.loc
990 990 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
991 991 other = hg.peer(repo, opts, dest)
992 992
993 993 # easier to perform discovery through the operation
994 994 op = exchange.pushoperation(repo, other)
995 995 exchange._pushdiscoverychangeset(op)
996 996
997 997 remotesubset = op.fallbackheads
998 998
999 999 with other.commandexecutor() as e:
1000 1000 remotephases = e.callcommand(b'listkeys',
1001 1001 {b'namespace': b'phases'}).result()
1002 1002 del other
1003 1003 publishing = remotephases.get(b'publishing', False)
1004 1004 if publishing:
1005 1005 ui.status((b'publishing: yes\n'))
1006 1006 else:
1007 1007 ui.status((b'publishing: no\n'))
1008 1008
1009 1009 nodemap = repo.changelog.nodemap
1010 1010 nonpublishroots = 0
1011 1011 for nhex, phase in remotephases.iteritems():
1012 1012 if nhex == b'publishing': # ignore data related to publish option
1013 1013 continue
1014 1014 node = bin(nhex)
1015 1015 if node in nodemap and int(phase):
1016 1016 nonpublishroots += 1
1017 1017 ui.status((b'number of roots: %d\n') % len(remotephases))
1018 1018 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
1019 1019 def d():
1020 1020 phases.remotephasessummary(repo,
1021 1021 remotesubset,
1022 1022 remotephases)
1023 1023 timer(d)
1024 1024 fm.end()
1025 1025
1026 1026 @command(b'perfmanifest',[
1027 1027 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1028 1028 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1029 1029 ] + formatteropts, b'REV|NODE')
1030 1030 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1031 1031 """benchmark the time to read a manifest from disk and return a usable
1032 1032 dict-like object
1033 1033
1034 1034 Manifest caches are cleared before retrieval."""
1035 1035 opts = _byteskwargs(opts)
1036 1036 timer, fm = gettimer(ui, opts)
1037 1037 if not manifest_rev:
1038 1038 ctx = scmutil.revsingle(repo, rev, rev)
1039 1039 t = ctx.manifestnode()
1040 1040 else:
1041 1041 from mercurial.node import bin
1042 1042
1043 1043 if len(rev) == 40:
1044 1044 t = bin(rev)
1045 1045 else:
1046 1046 try:
1047 1047 rev = int(rev)
1048 1048
1049 1049 if util.safehasattr(repo.manifestlog, b'getstorage'):
1050 1050 t = repo.manifestlog.getstorage(b'').node(rev)
1051 1051 else:
1052 1052 t = repo.manifestlog._revlog.lookup(rev)
1053 1053 except ValueError:
1054 1054 raise error.Abort(b'manifest revision must be integer or full '
1055 1055 b'node')
1056 1056 def d():
1057 1057 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1058 1058 repo.manifestlog[t].read()
1059 1059 timer(d)
1060 1060 fm.end()
1061 1061
1062 1062 @command(b'perfchangeset', formatteropts)
1063 1063 def perfchangeset(ui, repo, rev, **opts):
1064 1064 opts = _byteskwargs(opts)
1065 1065 timer, fm = gettimer(ui, opts)
1066 1066 n = scmutil.revsingle(repo, rev).node()
1067 1067 def d():
1068 1068 repo.changelog.read(n)
1069 1069 #repo.changelog._cache = None
1070 1070 timer(d)
1071 1071 fm.end()
1072 1072
1073 1073 @command(b'perfignore', formatteropts)
1074 1074 def perfignore(ui, repo, **opts):
1075 1075 """benchmark operation related to computing ignore"""
1076 1076 opts = _byteskwargs(opts)
1077 1077 timer, fm = gettimer(ui, opts)
1078 1078 dirstate = repo.dirstate
1079 1079
1080 1080 def setupone():
1081 1081 dirstate.invalidate()
1082 1082 clearfilecache(dirstate, b'_ignore')
1083 1083
1084 1084 def runone():
1085 1085 dirstate._ignore
1086 1086
1087 1087 timer(runone, setup=setupone, title=b"load")
1088 1088 fm.end()
1089 1089
1090 1090 @command(b'perfindex', [
1091 1091 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1092 1092 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1093 1093 ] + formatteropts)
1094 1094 def perfindex(ui, repo, **opts):
1095 1095 """benchmark index creation time followed by a lookup
1096 1096
1097 1097 The default is to look `tip` up. Depending on the index implementation,
1098 1098 the revision looked up can matters. For example, an implementation
1099 1099 scanning the index will have a faster lookup time for `--rev tip` than for
1100 1100 `--rev 0`. The number of looked up revisions and their order can also
1101 1101 matters.
1102 1102
1103 1103 Example of useful set to test:
1104 1104 * tip
1105 1105 * 0
1106 1106 * -10:
1107 1107 * :10
1108 1108 * -10: + :10
1109 1109 * :10: + -10:
1110 1110 * -10000:
1111 1111 * -10000: + 0
1112 1112
1113 1113 It is not currently possible to check for lookup of a missing node. For
1114 1114 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1115 1115 import mercurial.revlog
1116 1116 opts = _byteskwargs(opts)
1117 1117 timer, fm = gettimer(ui, opts)
1118 1118 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1119 1119 if opts[b'no_lookup']:
1120 1120 if opts['rev']:
1121 1121 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1122 1122 nodes = []
1123 1123 elif not opts[b'rev']:
1124 1124 nodes = [repo[b"tip"].node()]
1125 1125 else:
1126 1126 revs = scmutil.revrange(repo, opts[b'rev'])
1127 1127 cl = repo.changelog
1128 1128 nodes = [cl.node(r) for r in revs]
1129 1129
1130 1130 unfi = repo.unfiltered()
1131 1131 # find the filecache func directly
1132 1132 # This avoid polluting the benchmark with the filecache logic
1133 1133 makecl = unfi.__class__.changelog.func
1134 1134 def setup():
1135 1135 # probably not necessary, but for good measure
1136 1136 clearchangelog(unfi)
1137 1137 def d():
1138 1138 cl = makecl(unfi)
1139 1139 for n in nodes:
1140 1140 cl.rev(n)
1141 1141 timer(d, setup=setup)
1142 1142 fm.end()
1143 1143
1144 1144 @command(b'perfnodemap', [
1145 1145 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1146 1146 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1147 1147 ] + formatteropts)
1148 1148 def perfnodemap(ui, repo, **opts):
1149 1149 """benchmark the time necessary to look up revision from a cold nodemap
1150 1150
1151 1151 Depending on the implementation, the amount and order of revision we look
1152 1152 up can varies. Example of useful set to test:
1153 1153 * tip
1154 1154 * 0
1155 1155 * -10:
1156 1156 * :10
1157 1157 * -10: + :10
1158 1158 * :10: + -10:
1159 1159 * -10000:
1160 1160 * -10000: + 0
1161 1161
1162 1162 The command currently focus on valid binary lookup. Benchmarking for
1163 1163 hexlookup, prefix lookup and missing lookup would also be valuable.
1164 1164 """
1165 1165 import mercurial.revlog
1166 1166 opts = _byteskwargs(opts)
1167 1167 timer, fm = gettimer(ui, opts)
1168 1168 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1169 1169
1170 1170 unfi = repo.unfiltered()
1171 1171 clearcaches = opts['clear_caches']
1172 1172 # find the filecache func directly
1173 1173 # This avoid polluting the benchmark with the filecache logic
1174 1174 makecl = unfi.__class__.changelog.func
1175 1175 if not opts[b'rev']:
1176 1176 raise error.Abort('use --rev to specify revisions to look up')
1177 1177 revs = scmutil.revrange(repo, opts[b'rev'])
1178 1178 cl = repo.changelog
1179 1179 nodes = [cl.node(r) for r in revs]
1180 1180
1181 1181 # use a list to pass reference to a nodemap from one closure to the next
1182 1182 nodeget = [None]
1183 1183 def setnodeget():
1184 1184 # probably not necessary, but for good measure
1185 1185 clearchangelog(unfi)
1186 1186 nodeget[0] = makecl(unfi).nodemap.get
1187 1187
1188 1188 def d():
1189 1189 get = nodeget[0]
1190 1190 for n in nodes:
1191 1191 get(n)
1192 1192
1193 1193 setup = None
1194 1194 if clearcaches:
1195 1195 def setup():
1196 1196 setnodeget()
1197 1197 else:
1198 1198 setnodeget()
1199 1199 d() # prewarm the data structure
1200 1200 timer(d, setup=setup)
1201 1201 fm.end()
1202 1202
1203 1203 @command(b'perfstartup', formatteropts)
1204 1204 def perfstartup(ui, repo, **opts):
1205 1205 opts = _byteskwargs(opts)
1206 1206 timer, fm = gettimer(ui, opts)
1207 1207 def d():
1208 1208 if os.name != r'nt':
1209 1209 os.system(b"HGRCPATH= %s version -q > /dev/null" %
1210 1210 fsencode(sys.argv[0]))
1211 1211 else:
1212 1212 os.environ[r'HGRCPATH'] = r' '
1213 1213 os.system(r"%s version -q > NUL" % sys.argv[0])
1214 1214 timer(d)
1215 1215 fm.end()
1216 1216
1217 1217 @command(b'perfparents', formatteropts)
1218 1218 def perfparents(ui, repo, **opts):
1219 1219 """benchmark the time necessary to fetch one changeset's parents.
1220 1220
1221 1221 The fetch is done using the `node identifier`, traversing all object layers
1222 1222 from the repository object. The first N revisions will be used for this
1223 1223 benchmark. N is controlled by the ``perf.parentscount`` config option
1224 1224 (default: 1000).
1225 1225 """
1226 1226 opts = _byteskwargs(opts)
1227 1227 timer, fm = gettimer(ui, opts)
1228 1228 # control the number of commits perfparents iterates over
1229 1229 # experimental config: perf.parentscount
1230 1230 count = getint(ui, b"perf", b"parentscount", 1000)
1231 1231 if len(repo.changelog) < count:
1232 1232 raise error.Abort(b"repo needs %d commits for this test" % count)
1233 1233 repo = repo.unfiltered()
1234 1234 nl = [repo.changelog.node(i) for i in _xrange(count)]
1235 1235 def d():
1236 1236 for n in nl:
1237 1237 repo.changelog.parents(n)
1238 1238 timer(d)
1239 1239 fm.end()
1240 1240
1241 1241 @command(b'perfctxfiles', formatteropts)
1242 1242 def perfctxfiles(ui, repo, x, **opts):
1243 1243 opts = _byteskwargs(opts)
1244 1244 x = int(x)
1245 1245 timer, fm = gettimer(ui, opts)
1246 1246 def d():
1247 1247 len(repo[x].files())
1248 1248 timer(d)
1249 1249 fm.end()
1250 1250
1251 1251 @command(b'perfrawfiles', formatteropts)
1252 1252 def perfrawfiles(ui, repo, x, **opts):
1253 1253 opts = _byteskwargs(opts)
1254 1254 x = int(x)
1255 1255 timer, fm = gettimer(ui, opts)
1256 1256 cl = repo.changelog
1257 1257 def d():
1258 1258 len(cl.read(x)[3])
1259 1259 timer(d)
1260 1260 fm.end()
1261 1261
1262 1262 @command(b'perflookup', formatteropts)
1263 1263 def perflookup(ui, repo, rev, **opts):
1264 1264 opts = _byteskwargs(opts)
1265 1265 timer, fm = gettimer(ui, opts)
1266 1266 timer(lambda: len(repo.lookup(rev)))
1267 1267 fm.end()
1268 1268
1269 1269 @command(b'perflinelogedits',
1270 1270 [(b'n', b'edits', 10000, b'number of edits'),
1271 1271 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1272 1272 ], norepo=True)
1273 1273 def perflinelogedits(ui, **opts):
1274 1274 from mercurial import linelog
1275 1275
1276 1276 opts = _byteskwargs(opts)
1277 1277
1278 1278 edits = opts[b'edits']
1279 1279 maxhunklines = opts[b'max_hunk_lines']
1280 1280
1281 1281 maxb1 = 100000
1282 1282 random.seed(0)
1283 1283 randint = random.randint
1284 1284 currentlines = 0
1285 1285 arglist = []
1286 1286 for rev in _xrange(edits):
1287 1287 a1 = randint(0, currentlines)
1288 1288 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1289 1289 b1 = randint(0, maxb1)
1290 1290 b2 = randint(b1, b1 + maxhunklines)
1291 1291 currentlines += (b2 - b1) - (a2 - a1)
1292 1292 arglist.append((rev, a1, a2, b1, b2))
1293 1293
1294 1294 def d():
1295 1295 ll = linelog.linelog()
1296 1296 for args in arglist:
1297 1297 ll.replacelines(*args)
1298 1298
1299 1299 timer, fm = gettimer(ui, opts)
1300 1300 timer(d)
1301 1301 fm.end()
1302 1302
1303 1303 @command(b'perfrevrange', formatteropts)
1304 1304 def perfrevrange(ui, repo, *specs, **opts):
1305 1305 opts = _byteskwargs(opts)
1306 1306 timer, fm = gettimer(ui, opts)
1307 1307 revrange = scmutil.revrange
1308 1308 timer(lambda: len(revrange(repo, specs)))
1309 1309 fm.end()
1310 1310
1311 1311 @command(b'perfnodelookup', formatteropts)
1312 1312 def perfnodelookup(ui, repo, rev, **opts):
1313 1313 opts = _byteskwargs(opts)
1314 1314 timer, fm = gettimer(ui, opts)
1315 1315 import mercurial.revlog
1316 1316 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1317 1317 n = scmutil.revsingle(repo, rev).node()
1318 1318 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1319 1319 def d():
1320 1320 cl.rev(n)
1321 1321 clearcaches(cl)
1322 1322 timer(d)
1323 1323 fm.end()
1324 1324
1325 1325 @command(b'perflog',
1326 1326 [(b'', b'rename', False, b'ask log to follow renames')
1327 1327 ] + formatteropts)
1328 1328 def perflog(ui, repo, rev=None, **opts):
1329 1329 opts = _byteskwargs(opts)
1330 1330 if rev is None:
1331 1331 rev=[]
1332 1332 timer, fm = gettimer(ui, opts)
1333 1333 ui.pushbuffer()
1334 1334 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1335 1335 copies=opts.get(b'rename')))
1336 1336 ui.popbuffer()
1337 1337 fm.end()
1338 1338
1339 1339 @command(b'perfmoonwalk', formatteropts)
1340 1340 def perfmoonwalk(ui, repo, **opts):
1341 1341 """benchmark walking the changelog backwards
1342 1342
1343 1343 This also loads the changelog data for each revision in the changelog.
1344 1344 """
1345 1345 opts = _byteskwargs(opts)
1346 1346 timer, fm = gettimer(ui, opts)
1347 1347 def moonwalk():
1348 1348 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1349 1349 ctx = repo[i]
1350 1350 ctx.branch() # read changelog data (in addition to the index)
1351 1351 timer(moonwalk)
1352 1352 fm.end()
1353 1353
1354 1354 @command(b'perftemplating',
1355 1355 [(b'r', b'rev', [], b'revisions to run the template on'),
1356 1356 ] + formatteropts)
1357 1357 def perftemplating(ui, repo, testedtemplate=None, **opts):
1358 1358 """test the rendering time of a given template"""
1359 1359 if makelogtemplater is None:
1360 1360 raise error.Abort((b"perftemplating not available with this Mercurial"),
1361 1361 hint=b"use 4.3 or later")
1362 1362
1363 1363 opts = _byteskwargs(opts)
1364 1364
1365 1365 nullui = ui.copy()
1366 1366 nullui.fout = open(os.devnull, r'wb')
1367 1367 nullui.disablepager()
1368 1368 revs = opts.get(b'rev')
1369 1369 if not revs:
1370 1370 revs = [b'all()']
1371 1371 revs = list(scmutil.revrange(repo, revs))
1372 1372
1373 1373 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1374 1374 b' {author|person}: {desc|firstline}\n')
1375 1375 if testedtemplate is None:
1376 1376 testedtemplate = defaulttemplate
1377 1377 displayer = makelogtemplater(nullui, repo, testedtemplate)
1378 1378 def format():
1379 1379 for r in revs:
1380 1380 ctx = repo[r]
1381 1381 displayer.show(ctx)
1382 1382 displayer.flush(ctx)
1383 1383
1384 1384 timer, fm = gettimer(ui, opts)
1385 1385 timer(format)
1386 1386 fm.end()
1387 1387
1388 1388 @command(b'perfhelper-pathcopies', formatteropts +
1389 1389 [
1390 1390 (b'r', b'revs', [], b'restrict search to these revisions'),
1391 1391 (b'', b'timing', False, b'provides extra data (costly)'),
1392 1392 ])
1393 1393 def perfhelperpathcopies(ui, repo, revs=[], **opts):
1394 1394 """find statistic about potential parameters for the `perftracecopies`
1395 1395
1396 1396 This command find source-destination pair relevant for copytracing testing.
1397 1397 It report value for some of the parameters that impact copy tracing time.
1398 1398
1399 1399 If `--timing` is set, rename detection is run and the associated timing
1400 1400 will be reported. The extra details comes at the cost of a slower command
1401 1401 execution.
1402 1402
1403 1403 Since the rename detection is only run once, other factors might easily
1404 1404 affect the precision of the timing. However it should give a good
1405 1405 approximation of which revision pairs are very costly.
1406 1406 """
1407 1407 opts = _byteskwargs(opts)
1408 1408 fm = ui.formatter(b'perf', opts)
1409 1409 dotiming = opts[b'timing']
1410 1410
1411 1411 if dotiming:
1412 1412 header = '%12s %12s %12s %12s %12s %12s\n'
1413 1413 output = ("%(source)12s %(destination)12s "
1414 1414 "%(nbrevs)12d %(nbmissingfiles)12d "
1415 1415 "%(nbrenamedfiles)12d %(time)18.5f\n")
1416 1416 header_names = ("source", "destination", "nb-revs", "nb-files",
1417 1417 "nb-renames", "time")
1418 1418 fm.plain(header % header_names)
1419 1419 else:
1420 1420 header = '%12s %12s %12s %12s\n'
1421 1421 output = ("%(source)12s %(destination)12s "
1422 1422 "%(nbrevs)12d %(nbmissingfiles)12d\n")
1423 1423 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
1424 1424
1425 1425 if not revs:
1426 1426 revs = ['all()']
1427 1427 revs = scmutil.revrange(repo, revs)
1428 1428
1429 1429 roi = repo.revs('merge() and %ld', revs)
1430 1430 for r in roi:
1431 1431 ctx = repo[r]
1432 1432 p1 = ctx.p1().rev()
1433 1433 p2 = ctx.p2().rev()
1434 1434 bases = repo.changelog._commonancestorsheads(p1, p2)
1435 1435 for p in (p1, p2):
1436 1436 for b in bases:
1437 1437 base = repo[b]
1438 1438 parent = repo[p]
1439 1439 missing = copies._computeforwardmissing(base, parent)
1440 1440 if not missing:
1441 1441 continue
1442 1442 data = {
1443 1443 b'source': base.hex(),
1444 1444 b'destination': parent.hex(),
1445 1445 b'nbrevs': len(repo.revs('%d::%d', b, p)),
1446 1446 b'nbmissingfiles': len(missing),
1447 1447 }
1448 1448 if dotiming:
1449 1449 begin = util.timer()
1450 1450 renames = copies.pathcopies(base, parent)
1451 1451 end = util.timer()
1452 1452 # not very stable timing since we did only one run
1453 1453 data['time'] = end - begin
1454 1454 data['nbrenamedfiles'] = len(renames)
1455 1455 fm.startitem()
1456 1456 fm.data(**data)
1457 1457 out = data.copy()
1458 1458 out['source'] = fm.hexfunc(base.node())
1459 1459 out['destination'] = fm.hexfunc(parent.node())
1460 1460 fm.plain(output % out)
1461 1461
1462 1462 fm.end()
1463 1463
1464 1464 @command(b'perfcca', formatteropts)
1465 1465 def perfcca(ui, repo, **opts):
1466 1466 opts = _byteskwargs(opts)
1467 1467 timer, fm = gettimer(ui, opts)
1468 1468 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1469 1469 fm.end()
1470 1470
1471 1471 @command(b'perffncacheload', formatteropts)
1472 1472 def perffncacheload(ui, repo, **opts):
1473 1473 opts = _byteskwargs(opts)
1474 1474 timer, fm = gettimer(ui, opts)
1475 1475 s = repo.store
1476 1476 def d():
1477 1477 s.fncache._load()
1478 1478 timer(d)
1479 1479 fm.end()
1480 1480
1481 1481 @command(b'perffncachewrite', formatteropts)
1482 1482 def perffncachewrite(ui, repo, **opts):
1483 1483 opts = _byteskwargs(opts)
1484 1484 timer, fm = gettimer(ui, opts)
1485 1485 s = repo.store
1486 1486 lock = repo.lock()
1487 1487 s.fncache._load()
1488 1488 tr = repo.transaction(b'perffncachewrite')
1489 1489 tr.addbackup(b'fncache')
1490 1490 def d():
1491 1491 s.fncache._dirty = True
1492 1492 s.fncache.write(tr)
1493 1493 timer(d)
1494 1494 tr.close()
1495 1495 lock.release()
1496 1496 fm.end()
1497 1497
1498 1498 @command(b'perffncacheencode', formatteropts)
1499 1499 def perffncacheencode(ui, repo, **opts):
1500 1500 opts = _byteskwargs(opts)
1501 1501 timer, fm = gettimer(ui, opts)
1502 1502 s = repo.store
1503 1503 s.fncache._load()
1504 1504 def d():
1505 1505 for p in s.fncache.entries:
1506 1506 s.encode(p)
1507 1507 timer(d)
1508 1508 fm.end()
1509 1509
1510 1510 def _bdiffworker(q, blocks, xdiff, ready, done):
1511 1511 while not done.is_set():
1512 1512 pair = q.get()
1513 1513 while pair is not None:
1514 1514 if xdiff:
1515 1515 mdiff.bdiff.xdiffblocks(*pair)
1516 1516 elif blocks:
1517 1517 mdiff.bdiff.blocks(*pair)
1518 1518 else:
1519 1519 mdiff.textdiff(*pair)
1520 1520 q.task_done()
1521 1521 pair = q.get()
1522 1522 q.task_done() # for the None one
1523 1523 with ready:
1524 1524 ready.wait()
1525 1525
1526 1526 def _manifestrevision(repo, mnode):
1527 1527 ml = repo.manifestlog
1528 1528
1529 1529 if util.safehasattr(ml, b'getstorage'):
1530 1530 store = ml.getstorage(b'')
1531 1531 else:
1532 1532 store = ml._revlog
1533 1533
1534 1534 return store.revision(mnode)
1535 1535
1536 1536 @command(b'perfbdiff', revlogopts + formatteropts + [
1537 1537 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1538 1538 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1539 1539 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1540 1540 (b'', b'blocks', False, b'test computing diffs into blocks'),
1541 1541 (b'', b'xdiff', False, b'use xdiff algorithm'),
1542 1542 ],
1543 1543
1544 1544 b'-c|-m|FILE REV')
1545 1545 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1546 1546 """benchmark a bdiff between revisions
1547 1547
1548 1548 By default, benchmark a bdiff between its delta parent and itself.
1549 1549
1550 1550 With ``--count``, benchmark bdiffs between delta parents and self for N
1551 1551 revisions starting at the specified revision.
1552 1552
1553 1553 With ``--alldata``, assume the requested revision is a changeset and
1554 1554 measure bdiffs for all changes related to that changeset (manifest
1555 1555 and filelogs).
1556 1556 """
1557 1557 opts = _byteskwargs(opts)
1558 1558
1559 1559 if opts[b'xdiff'] and not opts[b'blocks']:
1560 1560 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1561 1561
1562 1562 if opts[b'alldata']:
1563 1563 opts[b'changelog'] = True
1564 1564
1565 1565 if opts.get(b'changelog') or opts.get(b'manifest'):
1566 1566 file_, rev = None, file_
1567 1567 elif rev is None:
1568 1568 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1569 1569
1570 1570 blocks = opts[b'blocks']
1571 1571 xdiff = opts[b'xdiff']
1572 1572 textpairs = []
1573 1573
1574 1574 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1575 1575
1576 1576 startrev = r.rev(r.lookup(rev))
1577 1577 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1578 1578 if opts[b'alldata']:
1579 1579 # Load revisions associated with changeset.
1580 1580 ctx = repo[rev]
1581 1581 mtext = _manifestrevision(repo, ctx.manifestnode())
1582 1582 for pctx in ctx.parents():
1583 1583 pman = _manifestrevision(repo, pctx.manifestnode())
1584 1584 textpairs.append((pman, mtext))
1585 1585
1586 1586 # Load filelog revisions by iterating manifest delta.
1587 1587 man = ctx.manifest()
1588 1588 pman = ctx.p1().manifest()
1589 1589 for filename, change in pman.diff(man).items():
1590 1590 fctx = repo.file(filename)
1591 1591 f1 = fctx.revision(change[0][0] or -1)
1592 1592 f2 = fctx.revision(change[1][0] or -1)
1593 1593 textpairs.append((f1, f2))
1594 1594 else:
1595 1595 dp = r.deltaparent(rev)
1596 1596 textpairs.append((r.revision(dp), r.revision(rev)))
1597 1597
1598 1598 withthreads = threads > 0
1599 1599 if not withthreads:
1600 1600 def d():
1601 1601 for pair in textpairs:
1602 1602 if xdiff:
1603 1603 mdiff.bdiff.xdiffblocks(*pair)
1604 1604 elif blocks:
1605 1605 mdiff.bdiff.blocks(*pair)
1606 1606 else:
1607 1607 mdiff.textdiff(*pair)
1608 1608 else:
1609 1609 q = queue()
1610 1610 for i in _xrange(threads):
1611 1611 q.put(None)
1612 1612 ready = threading.Condition()
1613 1613 done = threading.Event()
1614 1614 for i in _xrange(threads):
1615 1615 threading.Thread(target=_bdiffworker,
1616 1616 args=(q, blocks, xdiff, ready, done)).start()
1617 1617 q.join()
1618 1618 def d():
1619 1619 for pair in textpairs:
1620 1620 q.put(pair)
1621 1621 for i in _xrange(threads):
1622 1622 q.put(None)
1623 1623 with ready:
1624 1624 ready.notify_all()
1625 1625 q.join()
1626 1626 timer, fm = gettimer(ui, opts)
1627 1627 timer(d)
1628 1628 fm.end()
1629 1629
1630 1630 if withthreads:
1631 1631 done.set()
1632 1632 for i in _xrange(threads):
1633 1633 q.put(None)
1634 1634 with ready:
1635 1635 ready.notify_all()
1636 1636
1637 1637 @command(b'perfunidiff', revlogopts + formatteropts + [
1638 1638 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1639 1639 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
1640 1640 ], b'-c|-m|FILE REV')
1641 1641 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1642 1642 """benchmark a unified diff between revisions
1643 1643
1644 1644 This doesn't include any copy tracing - it's just a unified diff
1645 1645 of the texts.
1646 1646
1647 1647 By default, benchmark a diff between its delta parent and itself.
1648 1648
1649 1649 With ``--count``, benchmark diffs between delta parents and self for N
1650 1650 revisions starting at the specified revision.
1651 1651
1652 1652 With ``--alldata``, assume the requested revision is a changeset and
1653 1653 measure diffs for all changes related to that changeset (manifest
1654 1654 and filelogs).
1655 1655 """
1656 1656 opts = _byteskwargs(opts)
1657 1657 if opts[b'alldata']:
1658 1658 opts[b'changelog'] = True
1659 1659
1660 1660 if opts.get(b'changelog') or opts.get(b'manifest'):
1661 1661 file_, rev = None, file_
1662 1662 elif rev is None:
1663 1663 raise error.CommandError(b'perfunidiff', b'invalid arguments')
1664 1664
1665 1665 textpairs = []
1666 1666
1667 1667 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
1668 1668
1669 1669 startrev = r.rev(r.lookup(rev))
1670 1670 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1671 1671 if opts[b'alldata']:
1672 1672 # Load revisions associated with changeset.
1673 1673 ctx = repo[rev]
1674 1674 mtext = _manifestrevision(repo, ctx.manifestnode())
1675 1675 for pctx in ctx.parents():
1676 1676 pman = _manifestrevision(repo, pctx.manifestnode())
1677 1677 textpairs.append((pman, mtext))
1678 1678
1679 1679 # Load filelog revisions by iterating manifest delta.
1680 1680 man = ctx.manifest()
1681 1681 pman = ctx.p1().manifest()
1682 1682 for filename, change in pman.diff(man).items():
1683 1683 fctx = repo.file(filename)
1684 1684 f1 = fctx.revision(change[0][0] or -1)
1685 1685 f2 = fctx.revision(change[1][0] or -1)
1686 1686 textpairs.append((f1, f2))
1687 1687 else:
1688 1688 dp = r.deltaparent(rev)
1689 1689 textpairs.append((r.revision(dp), r.revision(rev)))
1690 1690
1691 1691 def d():
1692 1692 for left, right in textpairs:
1693 1693 # The date strings don't matter, so we pass empty strings.
1694 1694 headerlines, hunks = mdiff.unidiff(
1695 1695 left, b'', right, b'', b'left', b'right', binary=False)
1696 1696 # consume iterators in roughly the way patch.py does
1697 1697 b'\n'.join(headerlines)
1698 1698 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1699 1699 timer, fm = gettimer(ui, opts)
1700 1700 timer(d)
1701 1701 fm.end()
1702 1702
1703 1703 @command(b'perfdiffwd', formatteropts)
1704 1704 def perfdiffwd(ui, repo, **opts):
1705 1705 """Profile diff of working directory changes"""
1706 1706 opts = _byteskwargs(opts)
1707 1707 timer, fm = gettimer(ui, opts)
1708 1708 options = {
1709 1709 'w': 'ignore_all_space',
1710 1710 'b': 'ignore_space_change',
1711 1711 'B': 'ignore_blank_lines',
1712 1712 }
1713 1713
1714 1714 for diffopt in ('', 'w', 'b', 'B', 'wB'):
1715 1715 opts = dict((options[c], b'1') for c in diffopt)
1716 1716 def d():
1717 1717 ui.pushbuffer()
1718 1718 commands.diff(ui, repo, **opts)
1719 1719 ui.popbuffer()
1720 1720 diffopt = diffopt.encode('ascii')
1721 1721 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
1722 1722 timer(d, title=title)
1723 1723 fm.end()
1724 1724
1725 1725 @command(b'perfrevlogindex', revlogopts + formatteropts,
1726 1726 b'-c|-m|FILE')
1727 1727 def perfrevlogindex(ui, repo, file_=None, **opts):
1728 1728 """Benchmark operations against a revlog index.
1729 1729
1730 1730 This tests constructing a revlog instance, reading index data,
1731 1731 parsing index data, and performing various operations related to
1732 1732 index data.
1733 1733 """
1734 1734
1735 1735 opts = _byteskwargs(opts)
1736 1736
1737 1737 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
1738 1738
1739 1739 opener = getattr(rl, 'opener') # trick linter
1740 1740 indexfile = rl.indexfile
1741 1741 data = opener.read(indexfile)
1742 1742
1743 1743 header = struct.unpack(b'>I', data[0:4])[0]
1744 1744 version = header & 0xFFFF
1745 1745 if version == 1:
1746 1746 revlogio = revlog.revlogio()
1747 1747 inline = header & (1 << 16)
1748 1748 else:
1749 1749 raise error.Abort((b'unsupported revlog version: %d') % version)
1750 1750
1751 1751 rllen = len(rl)
1752 1752
1753 1753 node0 = rl.node(0)
1754 1754 node25 = rl.node(rllen // 4)
1755 1755 node50 = rl.node(rllen // 2)
1756 1756 node75 = rl.node(rllen // 4 * 3)
1757 1757 node100 = rl.node(rllen - 1)
1758 1758
1759 1759 allrevs = range(rllen)
1760 1760 allrevsrev = list(reversed(allrevs))
1761 1761 allnodes = [rl.node(rev) for rev in range(rllen)]
1762 1762 allnodesrev = list(reversed(allnodes))
1763 1763
1764 1764 def constructor():
1765 1765 revlog.revlog(opener, indexfile)
1766 1766
1767 1767 def read():
1768 1768 with opener(indexfile) as fh:
1769 1769 fh.read()
1770 1770
1771 1771 def parseindex():
1772 1772 revlogio.parseindex(data, inline)
1773 1773
1774 1774 def getentry(revornode):
1775 1775 index = revlogio.parseindex(data, inline)[0]
1776 1776 index[revornode]
1777 1777
1778 1778 def getentries(revs, count=1):
1779 1779 index = revlogio.parseindex(data, inline)[0]
1780 1780
1781 1781 for i in range(count):
1782 1782 for rev in revs:
1783 1783 index[rev]
1784 1784
1785 1785 def resolvenode(node):
1786 1786 nodemap = revlogio.parseindex(data, inline)[1]
1787 1787 # This only works for the C code.
1788 1788 if nodemap is None:
1789 1789 return
1790 1790
1791 1791 try:
1792 1792 nodemap[node]
1793 1793 except error.RevlogError:
1794 1794 pass
1795 1795
1796 1796 def resolvenodes(nodes, count=1):
1797 1797 nodemap = revlogio.parseindex(data, inline)[1]
1798 1798 if nodemap is None:
1799 1799 return
1800 1800
1801 1801 for i in range(count):
1802 1802 for node in nodes:
1803 1803 try:
1804 1804 nodemap[node]
1805 1805 except error.RevlogError:
1806 1806 pass
1807 1807
1808 1808 benches = [
1809 1809 (constructor, b'revlog constructor'),
1810 1810 (read, b'read'),
1811 1811 (parseindex, b'create index object'),
1812 1812 (lambda: getentry(0), b'retrieve index entry for rev 0'),
1813 1813 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
1814 1814 (lambda: resolvenode(node0), b'look up node at rev 0'),
1815 1815 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
1816 1816 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
1817 1817 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
1818 1818 (lambda: resolvenode(node100), b'look up node at tip'),
1819 1819 # 2x variation is to measure caching impact.
1820 1820 (lambda: resolvenodes(allnodes),
1821 1821 b'look up all nodes (forward)'),
1822 1822 (lambda: resolvenodes(allnodes, 2),
1823 1823 b'look up all nodes 2x (forward)'),
1824 1824 (lambda: resolvenodes(allnodesrev),
1825 1825 b'look up all nodes (reverse)'),
1826 1826 (lambda: resolvenodes(allnodesrev, 2),
1827 1827 b'look up all nodes 2x (reverse)'),
1828 1828 (lambda: getentries(allrevs),
1829 1829 b'retrieve all index entries (forward)'),
1830 1830 (lambda: getentries(allrevs, 2),
1831 1831 b'retrieve all index entries 2x (forward)'),
1832 1832 (lambda: getentries(allrevsrev),
1833 1833 b'retrieve all index entries (reverse)'),
1834 1834 (lambda: getentries(allrevsrev, 2),
1835 1835 b'retrieve all index entries 2x (reverse)'),
1836 1836 ]
1837 1837
1838 1838 for fn, title in benches:
1839 1839 timer, fm = gettimer(ui, opts)
1840 1840 timer(fn, title=title)
1841 1841 fm.end()
1842 1842
1843 1843 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
1844 1844 [(b'd', b'dist', 100, b'distance between the revisions'),
1845 1845 (b's', b'startrev', 0, b'revision to start reading at'),
1846 1846 (b'', b'reverse', False, b'read in reverse')],
1847 1847 b'-c|-m|FILE')
1848 1848 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
1849 1849 **opts):
1850 1850 """Benchmark reading a series of revisions from a revlog.
1851 1851
1852 1852 By default, we read every ``-d/--dist`` revision from 0 to tip of
1853 1853 the specified revlog.
1854 1854
1855 1855 The start revision can be defined via ``-s/--startrev``.
1856 1856 """
1857 1857 opts = _byteskwargs(opts)
1858 1858
1859 1859 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
1860 1860 rllen = getlen(ui)(rl)
1861 1861
1862 1862 if startrev < 0:
1863 1863 startrev = rllen + startrev
1864 1864
1865 1865 def d():
1866 1866 rl.clearcaches()
1867 1867
1868 1868 beginrev = startrev
1869 1869 endrev = rllen
1870 1870 dist = opts[b'dist']
1871 1871
1872 1872 if reverse:
1873 1873 beginrev, endrev = endrev - 1, beginrev - 1
1874 1874 dist = -1 * dist
1875 1875
1876 1876 for x in _xrange(beginrev, endrev, dist):
1877 1877 # Old revisions don't support passing int.
1878 1878 n = rl.node(x)
1879 1879 rl.revision(n)
1880 1880
1881 1881 timer, fm = gettimer(ui, opts)
1882 1882 timer(d)
1883 1883 fm.end()
1884 1884
1885 1885 @command(b'perfrevlogwrite', revlogopts + formatteropts +
1886 1886 [(b's', b'startrev', 1000, b'revision to start writing at'),
1887 1887 (b'', b'stoprev', -1, b'last revision to write'),
1888 1888 (b'', b'count', 3, b'last revision to write'),
1889 1889 (b'', b'details', False, b'print timing for every revisions tested'),
1890 1890 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
1891 1891 (b'', b'lazydeltabase', True, b'try the provided delta first'),
1892 1892 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1893 1893 ],
1894 1894 b'-c|-m|FILE')
1895 1895 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
1896 1896 """Benchmark writing a series of revisions to a revlog.
1897 1897
1898 1898 Possible source values are:
1899 1899 * `full`: add from a full text (default).
1900 1900 * `parent-1`: add from a delta to the first parent
1901 1901 * `parent-2`: add from a delta to the second parent if it exists
1902 1902 (use a delta from the first parent otherwise)
1903 1903 * `parent-smallest`: add from the smallest delta (either p1 or p2)
1904 1904 * `storage`: add from the existing precomputed deltas
1905 1905 """
1906 1906 opts = _byteskwargs(opts)
1907 1907
1908 1908 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
1909 1909 rllen = getlen(ui)(rl)
1910 1910 if startrev < 0:
1911 1911 startrev = rllen + startrev
1912 1912 if stoprev < 0:
1913 1913 stoprev = rllen + stoprev
1914 1914
1915 1915 lazydeltabase = opts['lazydeltabase']
1916 1916 source = opts['source']
1917 1917 clearcaches = opts['clear_caches']
1918 1918 validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest',
1919 1919 b'storage')
1920 1920 if source not in validsource:
1921 1921 raise error.Abort('invalid source type: %s' % source)
1922 1922
1923 1923 ### actually gather results
1924 1924 count = opts['count']
1925 1925 if count <= 0:
1926 1926 raise error.Abort('invalide run count: %d' % count)
1927 1927 allresults = []
1928 1928 for c in range(count):
1929 1929 timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1,
1930 1930 lazydeltabase=lazydeltabase,
1931 1931 clearcaches=clearcaches)
1932 1932 allresults.append(timing)
1933 1933
1934 1934 ### consolidate the results in a single list
1935 1935 results = []
1936 1936 for idx, (rev, t) in enumerate(allresults[0]):
1937 1937 ts = [t]
1938 1938 for other in allresults[1:]:
1939 1939 orev, ot = other[idx]
1940 1940 assert orev == rev
1941 1941 ts.append(ot)
1942 1942 results.append((rev, ts))
1943 1943 resultcount = len(results)
1944 1944
1945 1945 ### Compute and display relevant statistics
1946 1946
1947 1947 # get a formatter
1948 1948 fm = ui.formatter(b'perf', opts)
1949 1949 displayall = ui.configbool(b"perf", b"all-timing", False)
1950 1950
1951 1951 # print individual details if requested
1952 1952 if opts['details']:
1953 1953 for idx, item in enumerate(results, 1):
1954 1954 rev, data = item
1955 1955 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
1956 1956 formatone(fm, data, title=title, displayall=displayall)
1957 1957
1958 1958 # sorts results by median time
1959 1959 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
1960 1960 # list of (name, index) to display)
1961 1961 relevants = [
1962 1962 ("min", 0),
1963 1963 ("10%", resultcount * 10 // 100),
1964 1964 ("25%", resultcount * 25 // 100),
1965 1965 ("50%", resultcount * 70 // 100),
1966 1966 ("75%", resultcount * 75 // 100),
1967 1967 ("90%", resultcount * 90 // 100),
1968 1968 ("95%", resultcount * 95 // 100),
1969 1969 ("99%", resultcount * 99 // 100),
1970 1970 ("99.9%", resultcount * 999 // 1000),
1971 1971 ("99.99%", resultcount * 9999 // 10000),
1972 1972 ("99.999%", resultcount * 99999 // 100000),
1973 1973 ("max", -1),
1974 1974 ]
1975 1975 if not ui.quiet:
1976 1976 for name, idx in relevants:
1977 1977 data = results[idx]
1978 1978 title = '%s of %d, rev %d' % (name, resultcount, data[0])
1979 1979 formatone(fm, data[1], title=title, displayall=displayall)
1980 1980
1981 1981 # XXX summing that many float will not be very precise, we ignore this fact
1982 1982 # for now
1983 1983 totaltime = []
1984 1984 for item in allresults:
1985 1985 totaltime.append((sum(x[1][0] for x in item),
1986 1986 sum(x[1][1] for x in item),
1987 1987 sum(x[1][2] for x in item),)
1988 1988 )
1989 1989 formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
1990 1990 displayall=displayall)
1991 1991 fm.end()
1992 1992
1993 1993 class _faketr(object):
1994 1994 def add(s, x, y, z=None):
1995 1995 return None
1996 1996
1997 1997 def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None,
1998 1998 lazydeltabase=True, clearcaches=True):
1999 1999 timings = []
2000 2000 tr = _faketr()
2001 2001 with _temprevlog(ui, orig, startrev) as dest:
2002 2002 dest._lazydeltabase = lazydeltabase
2003 2003 revs = list(orig.revs(startrev, stoprev))
2004 2004 total = len(revs)
2005 2005 topic = 'adding'
2006 2006 if runidx is not None:
2007 2007 topic += ' (run #%d)' % runidx
2008 2008 # Support both old and new progress API
2009 2009 if util.safehasattr(ui, 'makeprogress'):
2010 2010 progress = ui.makeprogress(topic, unit='revs', total=total)
2011 2011 def updateprogress(pos):
2012 2012 progress.update(pos)
2013 2013 def completeprogress():
2014 2014 progress.complete()
2015 2015 else:
2016 2016 def updateprogress(pos):
2017 2017 ui.progress(topic, pos, unit='revs', total=total)
2018 2018 def completeprogress():
2019 2019 ui.progress(topic, None, unit='revs', total=total)
2020 2020
2021 2021 for idx, rev in enumerate(revs):
2022 2022 updateprogress(idx)
2023 2023 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
2024 2024 if clearcaches:
2025 2025 dest.index.clearcaches()
2026 2026 dest.clearcaches()
2027 2027 with timeone() as r:
2028 2028 dest.addrawrevision(*addargs, **addkwargs)
2029 2029 timings.append((rev, r[0]))
2030 2030 updateprogress(total)
2031 2031 completeprogress()
2032 2032 return timings
2033 2033
2034 2034 def _getrevisionseed(orig, rev, tr, source):
2035 2035 from mercurial.node import nullid
2036 2036
2037 2037 linkrev = orig.linkrev(rev)
2038 2038 node = orig.node(rev)
2039 2039 p1, p2 = orig.parents(node)
2040 2040 flags = orig.flags(rev)
2041 2041 cachedelta = None
2042 2042 text = None
2043 2043
2044 2044 if source == b'full':
2045 2045 text = orig.revision(rev)
2046 2046 elif source == b'parent-1':
2047 2047 baserev = orig.rev(p1)
2048 2048 cachedelta = (baserev, orig.revdiff(p1, rev))
2049 2049 elif source == b'parent-2':
2050 2050 parent = p2
2051 2051 if p2 == nullid:
2052 2052 parent = p1
2053 2053 baserev = orig.rev(parent)
2054 2054 cachedelta = (baserev, orig.revdiff(parent, rev))
2055 2055 elif source == b'parent-smallest':
2056 2056 p1diff = orig.revdiff(p1, rev)
2057 2057 parent = p1
2058 2058 diff = p1diff
2059 2059 if p2 != nullid:
2060 2060 p2diff = orig.revdiff(p2, rev)
2061 2061 if len(p1diff) > len(p2diff):
2062 2062 parent = p2
2063 2063 diff = p2diff
2064 2064 baserev = orig.rev(parent)
2065 2065 cachedelta = (baserev, diff)
2066 2066 elif source == b'storage':
2067 2067 baserev = orig.deltaparent(rev)
2068 2068 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
2069 2069
2070 2070 return ((text, tr, linkrev, p1, p2),
2071 2071 {'node': node, 'flags': flags, 'cachedelta': cachedelta})
2072 2072
2073 2073 @contextlib.contextmanager
2074 2074 def _temprevlog(ui, orig, truncaterev):
2075 2075 from mercurial import vfs as vfsmod
2076 2076
2077 2077 if orig._inline:
2078 2078 raise error.Abort('not supporting inline revlog (yet)')
2079 2079
2080 2080 origindexpath = orig.opener.join(orig.indexfile)
2081 2081 origdatapath = orig.opener.join(orig.datafile)
2082 2082 indexname = 'revlog.i'
2083 2083 dataname = 'revlog.d'
2084 2084
2085 2085 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
2086 2086 try:
2087 2087 # copy the data file in a temporary directory
2088 2088 ui.debug('copying data in %s\n' % tmpdir)
2089 2089 destindexpath = os.path.join(tmpdir, 'revlog.i')
2090 2090 destdatapath = os.path.join(tmpdir, 'revlog.d')
2091 2091 shutil.copyfile(origindexpath, destindexpath)
2092 2092 shutil.copyfile(origdatapath, destdatapath)
2093 2093
2094 2094 # remove the data we want to add again
2095 2095 ui.debug('truncating data to be rewritten\n')
2096 2096 with open(destindexpath, 'ab') as index:
2097 2097 index.seek(0)
2098 2098 index.truncate(truncaterev * orig._io.size)
2099 2099 with open(destdatapath, 'ab') as data:
2100 2100 data.seek(0)
2101 2101 data.truncate(orig.start(truncaterev))
2102 2102
2103 2103 # instantiate a new revlog from the temporary copy
2104 2104 ui.debug('truncating adding to be rewritten\n')
2105 2105 vfs = vfsmod.vfs(tmpdir)
2106 2106 vfs.options = getattr(orig.opener, 'options', None)
2107 2107
2108 2108 dest = revlog.revlog(vfs,
2109 2109 indexfile=indexname,
2110 2110 datafile=dataname)
2111 2111 if dest._inline:
2112 2112 raise error.Abort('not supporting inline revlog (yet)')
2113 2113 # make sure internals are initialized
2114 2114 dest.revision(len(dest) - 1)
2115 2115 yield dest
2116 2116 del dest, vfs
2117 2117 finally:
2118 2118 shutil.rmtree(tmpdir, True)
2119 2119
2120 2120 @command(b'perfrevlogchunks', revlogopts + formatteropts +
2121 2121 [(b'e', b'engines', b'', b'compression engines to use'),
2122 2122 (b's', b'startrev', 0, b'revision to start at')],
2123 2123 b'-c|-m|FILE')
2124 2124 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
2125 2125 """Benchmark operations on revlog chunks.
2126 2126
2127 2127 Logically, each revlog is a collection of fulltext revisions. However,
2128 2128 stored within each revlog are "chunks" of possibly compressed data. This
2129 2129 data needs to be read and decompressed or compressed and written.
2130 2130
2131 2131 This command measures the time it takes to read+decompress and recompress
2132 2132 chunks in a revlog. It effectively isolates I/O and compression performance.
2133 2133 For measurements of higher-level operations like resolving revisions,
2134 2134 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
2135 2135 """
2136 2136 opts = _byteskwargs(opts)
2137 2137
2138 2138 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
2139 2139
2140 2140 # _chunkraw was renamed to _getsegmentforrevs.
2141 2141 try:
2142 2142 segmentforrevs = rl._getsegmentforrevs
2143 2143 except AttributeError:
2144 2144 segmentforrevs = rl._chunkraw
2145 2145
2146 2146 # Verify engines argument.
2147 2147 if engines:
2148 2148 engines = set(e.strip() for e in engines.split(b','))
2149 2149 for engine in engines:
2150 2150 try:
2151 2151 util.compressionengines[engine]
2152 2152 except KeyError:
2153 2153 raise error.Abort(b'unknown compression engine: %s' % engine)
2154 2154 else:
2155 2155 engines = []
2156 2156 for e in util.compengines:
2157 2157 engine = util.compengines[e]
2158 2158 try:
2159 2159 if engine.available():
2160 2160 engine.revlogcompressor().compress(b'dummy')
2161 2161 engines.append(e)
2162 2162 except NotImplementedError:
2163 2163 pass
2164 2164
2165 2165 revs = list(rl.revs(startrev, len(rl) - 1))
2166 2166
2167 2167 def rlfh(rl):
2168 2168 if rl._inline:
2169 2169 return getsvfs(repo)(rl.indexfile)
2170 2170 else:
2171 2171 return getsvfs(repo)(rl.datafile)
2172 2172
2173 2173 def doread():
2174 2174 rl.clearcaches()
2175 2175 for rev in revs:
2176 2176 segmentforrevs(rev, rev)
2177 2177
2178 2178 def doreadcachedfh():
2179 2179 rl.clearcaches()
2180 2180 fh = rlfh(rl)
2181 2181 for rev in revs:
2182 2182 segmentforrevs(rev, rev, df=fh)
2183 2183
2184 2184 def doreadbatch():
2185 2185 rl.clearcaches()
2186 2186 segmentforrevs(revs[0], revs[-1])
2187 2187
2188 2188 def doreadbatchcachedfh():
2189 2189 rl.clearcaches()
2190 2190 fh = rlfh(rl)
2191 2191 segmentforrevs(revs[0], revs[-1], df=fh)
2192 2192
2193 2193 def dochunk():
2194 2194 rl.clearcaches()
2195 2195 fh = rlfh(rl)
2196 2196 for rev in revs:
2197 2197 rl._chunk(rev, df=fh)
2198 2198
2199 2199 chunks = [None]
2200 2200
2201 2201 def dochunkbatch():
2202 2202 rl.clearcaches()
2203 2203 fh = rlfh(rl)
2204 2204 # Save chunks as a side-effect.
2205 2205 chunks[0] = rl._chunks(revs, df=fh)
2206 2206
2207 2207 def docompress(compressor):
2208 2208 rl.clearcaches()
2209 2209
2210 2210 try:
2211 2211 # Swap in the requested compression engine.
2212 2212 oldcompressor = rl._compressor
2213 2213 rl._compressor = compressor
2214 2214 for chunk in chunks[0]:
2215 2215 rl.compress(chunk)
2216 2216 finally:
2217 2217 rl._compressor = oldcompressor
2218 2218
2219 2219 benches = [
2220 2220 (lambda: doread(), b'read'),
2221 2221 (lambda: doreadcachedfh(), b'read w/ reused fd'),
2222 2222 (lambda: doreadbatch(), b'read batch'),
2223 2223 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
2224 2224 (lambda: dochunk(), b'chunk'),
2225 2225 (lambda: dochunkbatch(), b'chunk batch'),
2226 2226 ]
2227 2227
2228 2228 for engine in sorted(engines):
2229 2229 compressor = util.compengines[engine].revlogcompressor()
2230 2230 benches.append((functools.partial(docompress, compressor),
2231 2231 b'compress w/ %s' % engine))
2232 2232
2233 2233 for fn, title in benches:
2234 2234 timer, fm = gettimer(ui, opts)
2235 2235 timer(fn, title=title)
2236 2236 fm.end()
2237 2237
2238 2238 @command(b'perfrevlogrevision', revlogopts + formatteropts +
2239 2239 [(b'', b'cache', False, b'use caches instead of clearing')],
2240 2240 b'-c|-m|FILE REV')
2241 2241 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
2242 2242 """Benchmark obtaining a revlog revision.
2243 2243
2244 2244 Obtaining a revlog revision consists of roughly the following steps:
2245 2245
2246 2246 1. Compute the delta chain
2247 2247 2. Slice the delta chain if applicable
2248 2248 3. Obtain the raw chunks for that delta chain
2249 2249 4. Decompress each raw chunk
2250 2250 5. Apply binary patches to obtain fulltext
2251 2251 6. Verify hash of fulltext
2252 2252
2253 2253 This command measures the time spent in each of these phases.
2254 2254 """
2255 2255 opts = _byteskwargs(opts)
2256 2256
2257 2257 if opts.get(b'changelog') or opts.get(b'manifest'):
2258 2258 file_, rev = None, file_
2259 2259 elif rev is None:
2260 2260 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
2261 2261
2262 2262 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
2263 2263
2264 2264 # _chunkraw was renamed to _getsegmentforrevs.
2265 2265 try:
2266 2266 segmentforrevs = r._getsegmentforrevs
2267 2267 except AttributeError:
2268 2268 segmentforrevs = r._chunkraw
2269 2269
2270 2270 node = r.lookup(rev)
2271 2271 rev = r.rev(node)
2272 2272
2273 2273 def getrawchunks(data, chain):
2274 2274 start = r.start
2275 2275 length = r.length
2276 2276 inline = r._inline
2277 2277 iosize = r._io.size
2278 2278 buffer = util.buffer
2279 2279
2280 2280 chunks = []
2281 2281 ladd = chunks.append
2282 2282 for idx, item in enumerate(chain):
2283 2283 offset = start(item[0])
2284 2284 bits = data[idx]
2285 2285 for rev in item:
2286 2286 chunkstart = start(rev)
2287 2287 if inline:
2288 2288 chunkstart += (rev + 1) * iosize
2289 2289 chunklength = length(rev)
2290 2290 ladd(buffer(bits, chunkstart - offset, chunklength))
2291 2291
2292 2292 return chunks
2293 2293
2294 2294 def dodeltachain(rev):
2295 2295 if not cache:
2296 2296 r.clearcaches()
2297 2297 r._deltachain(rev)
2298 2298
2299 2299 def doread(chain):
2300 2300 if not cache:
2301 2301 r.clearcaches()
2302 2302 for item in slicedchain:
2303 2303 segmentforrevs(item[0], item[-1])
2304 2304
2305 2305 def doslice(r, chain, size):
2306 2306 for s in slicechunk(r, chain, targetsize=size):
2307 2307 pass
2308 2308
2309 2309 def dorawchunks(data, chain):
2310 2310 if not cache:
2311 2311 r.clearcaches()
2312 2312 getrawchunks(data, chain)
2313 2313
2314 2314 def dodecompress(chunks):
2315 2315 decomp = r.decompress
2316 2316 for chunk in chunks:
2317 2317 decomp(chunk)
2318 2318
2319 2319 def dopatch(text, bins):
2320 2320 if not cache:
2321 2321 r.clearcaches()
2322 2322 mdiff.patches(text, bins)
2323 2323
2324 2324 def dohash(text):
2325 2325 if not cache:
2326 2326 r.clearcaches()
2327 2327 r.checkhash(text, node, rev=rev)
2328 2328
2329 2329 def dorevision():
2330 2330 if not cache:
2331 2331 r.clearcaches()
2332 2332 r.revision(node)
2333 2333
2334 2334 try:
2335 2335 from mercurial.revlogutils.deltas import slicechunk
2336 2336 except ImportError:
2337 2337 slicechunk = getattr(revlog, '_slicechunk', None)
2338 2338
2339 2339 size = r.length(rev)
2340 2340 chain = r._deltachain(rev)[0]
2341 2341 if not getattr(r, '_withsparseread', False):
2342 2342 slicedchain = (chain,)
2343 2343 else:
2344 2344 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
2345 2345 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
2346 2346 rawchunks = getrawchunks(data, slicedchain)
2347 2347 bins = r._chunks(chain)
2348 2348 text = bytes(bins[0])
2349 2349 bins = bins[1:]
2350 2350 text = mdiff.patches(text, bins)
2351 2351
2352 2352 benches = [
2353 2353 (lambda: dorevision(), b'full'),
2354 2354 (lambda: dodeltachain(rev), b'deltachain'),
2355 2355 (lambda: doread(chain), b'read'),
2356 2356 ]
2357 2357
2358 2358 if getattr(r, '_withsparseread', False):
2359 2359 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
2360 2360 benches.append(slicing)
2361 2361
2362 2362 benches.extend([
2363 2363 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
2364 2364 (lambda: dodecompress(rawchunks), b'decompress'),
2365 2365 (lambda: dopatch(text, bins), b'patch'),
2366 2366 (lambda: dohash(text), b'hash'),
2367 2367 ])
2368 2368
2369 2369 timer, fm = gettimer(ui, opts)
2370 2370 for fn, title in benches:
2371 2371 timer(fn, title=title)
2372 2372 fm.end()
2373 2373
2374 2374 @command(b'perfrevset',
2375 2375 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
2376 2376 (b'', b'contexts', False, b'obtain changectx for each revision')]
2377 2377 + formatteropts, b"REVSET")
2378 2378 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
2379 2379 """benchmark the execution time of a revset
2380 2380
2381 2381 Use the --clean option if need to evaluate the impact of build volatile
2382 2382 revisions set cache on the revset execution. Volatile cache hold filtered
2383 2383 and obsolete related cache."""
2384 2384 opts = _byteskwargs(opts)
2385 2385
2386 2386 timer, fm = gettimer(ui, opts)
2387 2387 def d():
2388 2388 if clear:
2389 2389 repo.invalidatevolatilesets()
2390 2390 if contexts:
2391 2391 for ctx in repo.set(expr): pass
2392 2392 else:
2393 2393 for r in repo.revs(expr): pass
2394 2394 timer(d)
2395 2395 fm.end()
2396 2396
2397 2397 @command(b'perfvolatilesets',
2398 2398 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
2399 2399 ] + formatteropts)
2400 2400 def perfvolatilesets(ui, repo, *names, **opts):
2401 2401 """benchmark the computation of various volatile set
2402 2402
2403 2403 Volatile set computes element related to filtering and obsolescence."""
2404 2404 opts = _byteskwargs(opts)
2405 2405 timer, fm = gettimer(ui, opts)
2406 2406 repo = repo.unfiltered()
2407 2407
2408 2408 def getobs(name):
2409 2409 def d():
2410 2410 repo.invalidatevolatilesets()
2411 2411 if opts[b'clear_obsstore']:
2412 2412 clearfilecache(repo, b'obsstore')
2413 2413 obsolete.getrevs(repo, name)
2414 2414 return d
2415 2415
2416 2416 allobs = sorted(obsolete.cachefuncs)
2417 2417 if names:
2418 2418 allobs = [n for n in allobs if n in names]
2419 2419
2420 2420 for name in allobs:
2421 2421 timer(getobs(name), title=name)
2422 2422
2423 2423 def getfiltered(name):
2424 2424 def d():
2425 2425 repo.invalidatevolatilesets()
2426 2426 if opts[b'clear_obsstore']:
2427 2427 clearfilecache(repo, b'obsstore')
2428 2428 repoview.filterrevs(repo, name)
2429 2429 return d
2430 2430
2431 2431 allfilter = sorted(repoview.filtertable)
2432 2432 if names:
2433 2433 allfilter = [n for n in allfilter if n in names]
2434 2434
2435 2435 for name in allfilter:
2436 2436 timer(getfiltered(name), title=name)
2437 2437 fm.end()
2438 2438
2439 2439 @command(b'perfbranchmap',
2440 2440 [(b'f', b'full', False,
2441 2441 b'Includes build time of subset'),
2442 2442 (b'', b'clear-revbranch', False,
2443 2443 b'purge the revbranch cache between computation'),
2444 2444 ] + formatteropts)
2445 2445 def perfbranchmap(ui, repo, *filternames, **opts):
2446 2446 """benchmark the update of a branchmap
2447 2447
2448 2448 This benchmarks the full repo.branchmap() call with read and write disabled
2449 2449 """
2450 2450 opts = _byteskwargs(opts)
2451 2451 full = opts.get(b"full", False)
2452 2452 clear_revbranch = opts.get(b"clear_revbranch", False)
2453 2453 timer, fm = gettimer(ui, opts)
2454 2454 def getbranchmap(filtername):
2455 2455 """generate a benchmark function for the filtername"""
2456 2456 if filtername is None:
2457 2457 view = repo
2458 2458 else:
2459 2459 view = repo.filtered(filtername)
2460 2460 if util.safehasattr(view._branchcaches, '_per_filter'):
2461 2461 filtered = view._branchcaches._per_filter
2462 2462 else:
2463 2463 # older versions
2464 2464 filtered = view._branchcaches
2465 2465 def d():
2466 2466 if clear_revbranch:
2467 2467 repo.revbranchcache()._clear()
2468 2468 if full:
2469 2469 view._branchcaches.clear()
2470 2470 else:
2471 2471 filtered.pop(filtername, None)
2472 2472 view.branchmap()
2473 2473 return d
2474 2474 # add filter in smaller subset to bigger subset
2475 2475 possiblefilters = set(repoview.filtertable)
2476 2476 if filternames:
2477 2477 possiblefilters &= set(filternames)
2478 2478 subsettable = getbranchmapsubsettable()
2479 2479 allfilters = []
2480 2480 while possiblefilters:
2481 2481 for name in possiblefilters:
2482 2482 subset = subsettable.get(name)
2483 2483 if subset not in possiblefilters:
2484 2484 break
2485 2485 else:
2486 2486 assert False, b'subset cycle %s!' % possiblefilters
2487 2487 allfilters.append(name)
2488 2488 possiblefilters.remove(name)
2489 2489
2490 2490 # warm the cache
2491 2491 if not full:
2492 2492 for name in allfilters:
2493 2493 repo.filtered(name).branchmap()
2494 2494 if not filternames or b'unfiltered' in filternames:
2495 2495 # add unfiltered
2496 2496 allfilters.append(None)
2497 2497
2498 2498 if util.safehasattr(branchmap.branchcache, 'fromfile'):
2499 2499 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
2500 2500 branchcacheread.set(classmethod(lambda *args: None))
2501 2501 else:
2502 2502 # older versions
2503 2503 branchcacheread = safeattrsetter(branchmap, b'read')
2504 2504 branchcacheread.set(lambda *args: None)
2505 2505 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
2506 2506 branchcachewrite.set(lambda *args: None)
2507 2507 try:
2508 2508 for name in allfilters:
2509 2509 printname = name
2510 2510 if name is None:
2511 2511 printname = b'unfiltered'
2512 2512 timer(getbranchmap(name), title=str(printname))
2513 2513 finally:
2514 2514 branchcacheread.restore()
2515 2515 branchcachewrite.restore()
2516 2516 fm.end()
2517 2517
2518 2518 @command(b'perfbranchmapupdate', [
2519 2519 (b'', b'base', [], b'subset of revision to start from'),
2520 2520 (b'', b'target', [], b'subset of revision to end with'),
2521 2521 (b'', b'clear-caches', False, b'clear cache between each runs')
2522 2522 ] + formatteropts)
2523 2523 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
2524 2524 """benchmark branchmap update from for <base> revs to <target> revs
2525 2525
2526 2526 If `--clear-caches` is passed, the following items will be reset before
2527 2527 each update:
2528 2528 * the changelog instance and associated indexes
2529 2529 * the rev-branch-cache instance
2530 2530
2531 2531 Examples:
2532 2532
2533 2533 # update for the one last revision
2534 2534 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
2535 2535
2536 2536 $ update for change coming with a new branch
2537 2537 $ hg perfbranchmapupdate --base 'stable' --target 'default'
2538 2538 """
2539 2539 from mercurial import branchmap
2540 2540 from mercurial import repoview
2541 2541 opts = _byteskwargs(opts)
2542 2542 timer, fm = gettimer(ui, opts)
2543 2543 clearcaches = opts[b'clear_caches']
2544 2544 unfi = repo.unfiltered()
2545 2545 x = [None] # used to pass data between closure
2546 2546
2547 2547 # we use a `list` here to avoid possible side effect from smartset
2548 2548 baserevs = list(scmutil.revrange(repo, base))
2549 2549 targetrevs = list(scmutil.revrange(repo, target))
2550 2550 if not baserevs:
2551 2551 raise error.Abort(b'no revisions selected for --base')
2552 2552 if not targetrevs:
2553 2553 raise error.Abort(b'no revisions selected for --target')
2554 2554
2555 2555 # make sure the target branchmap also contains the one in the base
2556 2556 targetrevs = list(set(baserevs) | set(targetrevs))
2557 2557 targetrevs.sort()
2558 2558
2559 2559 cl = repo.changelog
2560 2560 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
2561 2561 allbaserevs.sort()
2562 2562 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
2563 2563
2564 2564 newrevs = list(alltargetrevs.difference(allbaserevs))
2565 2565 newrevs.sort()
2566 2566
2567 2567 allrevs = frozenset(unfi.changelog.revs())
2568 2568 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
2569 2569 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
2570 2570
2571 2571 def basefilter(repo, visibilityexceptions=None):
2572 2572 return basefilterrevs
2573 2573
2574 2574 def targetfilter(repo, visibilityexceptions=None):
2575 2575 return targetfilterrevs
2576 2576
2577 2577 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
2578 2578 ui.status(msg % (len(allbaserevs), len(newrevs)))
2579 2579 if targetfilterrevs:
2580 2580 msg = b'(%d revisions still filtered)\n'
2581 2581 ui.status(msg % len(targetfilterrevs))
2582 2582
2583 2583 try:
2584 2584 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
2585 2585 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
2586 2586
2587 2587 baserepo = repo.filtered(b'__perf_branchmap_update_base')
2588 2588 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
2589 2589
2590 2590 # try to find an existing branchmap to reuse
2591 2591 subsettable = getbranchmapsubsettable()
2592 2592 candidatefilter = subsettable.get(None)
2593 2593 while candidatefilter is not None:
2594 2594 candidatebm = repo.filtered(candidatefilter).branchmap()
2595 2595 if candidatebm.validfor(baserepo):
2596 2596 filtered = repoview.filterrevs(repo, candidatefilter)
2597 2597 missing = [r for r in allbaserevs if r in filtered]
2598 2598 base = candidatebm.copy()
2599 2599 base.update(baserepo, missing)
2600 2600 break
2601 2601 candidatefilter = subsettable.get(candidatefilter)
2602 2602 else:
2603 2603 # no suitable subset where found
2604 2604 base = branchmap.branchcache()
2605 2605 base.update(baserepo, allbaserevs)
2606 2606
2607 2607 def setup():
2608 2608 x[0] = base.copy()
2609 2609 if clearcaches:
2610 2610 unfi._revbranchcache = None
2611 2611 clearchangelog(repo)
2612 2612
2613 2613 def bench():
2614 2614 x[0].update(targetrepo, newrevs)
2615 2615
2616 2616 timer(bench, setup=setup)
2617 2617 fm.end()
2618 2618 finally:
2619 2619 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
2620 2620 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
2621 2621
2622 2622 @command(b'perfbranchmapload', [
2623 2623 (b'f', b'filter', b'', b'Specify repoview filter'),
2624 2624 (b'', b'list', False, b'List brachmap filter caches'),
2625 2625 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
2626 2626
2627 2627 ] + formatteropts)
2628 2628 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
2629 2629 """benchmark reading the branchmap"""
2630 2630 opts = _byteskwargs(opts)
2631 2631 clearrevlogs = opts[b'clear_revlogs']
2632 2632
2633 2633 if list:
2634 2634 for name, kind, st in repo.cachevfs.readdir(stat=True):
2635 2635 if name.startswith(b'branch2'):
2636 2636 filtername = name.partition(b'-')[2] or b'unfiltered'
2637 2637 ui.status(b'%s - %s\n'
2638 2638 % (filtername, util.bytecount(st.st_size)))
2639 2639 return
2640 2640 if not filter:
2641 2641 filter = None
2642 2642 subsettable = getbranchmapsubsettable()
2643 2643 if filter is None:
2644 2644 repo = repo.unfiltered()
2645 2645 else:
2646 2646 repo = repoview.repoview(repo, filter)
2647 2647
2648 2648 repo.branchmap() # make sure we have a relevant, up to date branchmap
2649 2649
2650 2650 try:
2651 2651 fromfile = branchmap.branchcache.fromfile
2652 2652 except AttributeError:
2653 2653 # older versions
2654 2654 fromfile = branchmap.read
2655 2655
2656 2656 currentfilter = filter
2657 2657 # try once without timer, the filter may not be cached
2658 2658 while fromfile(repo) is None:
2659 2659 currentfilter = subsettable.get(currentfilter)
2660 2660 if currentfilter is None:
2661 2661 raise error.Abort(b'No branchmap cached for %s repo'
2662 2662 % (filter or b'unfiltered'))
2663 2663 repo = repo.filtered(currentfilter)
2664 2664 timer, fm = gettimer(ui, opts)
2665 2665 def setup():
2666 2666 if clearrevlogs:
2667 2667 clearchangelog(repo)
2668 2668 def bench():
2669 2669 fromfile(repo)
2670 2670 timer(bench, setup=setup)
2671 2671 fm.end()
2672 2672
2673 2673 @command(b'perfloadmarkers')
2674 2674 def perfloadmarkers(ui, repo):
2675 2675 """benchmark the time to parse the on-disk markers for a repo
2676 2676
2677 2677 Result is the number of markers in the repo."""
2678 2678 timer, fm = gettimer(ui)
2679 2679 svfs = getsvfs(repo)
2680 2680 timer(lambda: len(obsolete.obsstore(svfs)))
2681 2681 fm.end()
2682 2682
2683 2683 @command(b'perflrucachedict', formatteropts +
2684 2684 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
2685 2685 (b'', b'mincost', 0, b'smallest cost of items in cache'),
2686 2686 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
2687 2687 (b'', b'size', 4, b'size of cache'),
2688 2688 (b'', b'gets', 10000, b'number of key lookups'),
2689 2689 (b'', b'sets', 10000, b'number of key sets'),
2690 2690 (b'', b'mixed', 10000, b'number of mixed mode operations'),
2691 2691 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
2692 2692 norepo=True)
2693 2693 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
2694 2694 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
2695 2695 opts = _byteskwargs(opts)
2696 2696
2697 2697 def doinit():
2698 2698 for i in _xrange(10000):
2699 2699 util.lrucachedict(size)
2700 2700
2701 2701 costrange = list(range(mincost, maxcost + 1))
2702 2702
2703 2703 values = []
2704 2704 for i in _xrange(size):
2705 2705 values.append(random.randint(0, _maxint))
2706 2706
2707 2707 # Get mode fills the cache and tests raw lookup performance with no
2708 2708 # eviction.
2709 2709 getseq = []
2710 2710 for i in _xrange(gets):
2711 2711 getseq.append(random.choice(values))
2712 2712
2713 2713 def dogets():
2714 2714 d = util.lrucachedict(size)
2715 2715 for v in values:
2716 2716 d[v] = v
2717 2717 for key in getseq:
2718 2718 value = d[key]
2719 2719 value # silence pyflakes warning
2720 2720
2721 2721 def dogetscost():
2722 2722 d = util.lrucachedict(size, maxcost=costlimit)
2723 2723 for i, v in enumerate(values):
2724 2724 d.insert(v, v, cost=costs[i])
2725 2725 for key in getseq:
2726 2726 try:
2727 2727 value = d[key]
2728 2728 value # silence pyflakes warning
2729 2729 except KeyError:
2730 2730 pass
2731 2731
2732 2732 # Set mode tests insertion speed with cache eviction.
2733 2733 setseq = []
2734 2734 costs = []
2735 2735 for i in _xrange(sets):
2736 2736 setseq.append(random.randint(0, _maxint))
2737 2737 costs.append(random.choice(costrange))
2738 2738
2739 2739 def doinserts():
2740 2740 d = util.lrucachedict(size)
2741 2741 for v in setseq:
2742 2742 d.insert(v, v)
2743 2743
2744 2744 def doinsertscost():
2745 2745 d = util.lrucachedict(size, maxcost=costlimit)
2746 2746 for i, v in enumerate(setseq):
2747 2747 d.insert(v, v, cost=costs[i])
2748 2748
2749 2749 def dosets():
2750 2750 d = util.lrucachedict(size)
2751 2751 for v in setseq:
2752 2752 d[v] = v
2753 2753
2754 2754 # Mixed mode randomly performs gets and sets with eviction.
2755 2755 mixedops = []
2756 2756 for i in _xrange(mixed):
2757 2757 r = random.randint(0, 100)
2758 2758 if r < mixedgetfreq:
2759 2759 op = 0
2760 2760 else:
2761 2761 op = 1
2762 2762
2763 2763 mixedops.append((op,
2764 2764 random.randint(0, size * 2),
2765 2765 random.choice(costrange)))
2766 2766
2767 2767 def domixed():
2768 2768 d = util.lrucachedict(size)
2769 2769
2770 2770 for op, v, cost in mixedops:
2771 2771 if op == 0:
2772 2772 try:
2773 2773 d[v]
2774 2774 except KeyError:
2775 2775 pass
2776 2776 else:
2777 2777 d[v] = v
2778 2778
2779 2779 def domixedcost():
2780 2780 d = util.lrucachedict(size, maxcost=costlimit)
2781 2781
2782 2782 for op, v, cost in mixedops:
2783 2783 if op == 0:
2784 2784 try:
2785 2785 d[v]
2786 2786 except KeyError:
2787 2787 pass
2788 2788 else:
2789 2789 d.insert(v, v, cost=cost)
2790 2790
2791 2791 benches = [
2792 2792 (doinit, b'init'),
2793 2793 ]
2794 2794
2795 2795 if costlimit:
2796 2796 benches.extend([
2797 2797 (dogetscost, b'gets w/ cost limit'),
2798 2798 (doinsertscost, b'inserts w/ cost limit'),
2799 2799 (domixedcost, b'mixed w/ cost limit'),
2800 2800 ])
2801 2801 else:
2802 2802 benches.extend([
2803 2803 (dogets, b'gets'),
2804 2804 (doinserts, b'inserts'),
2805 2805 (dosets, b'sets'),
2806 2806 (domixed, b'mixed')
2807 2807 ])
2808 2808
2809 2809 for fn, title in benches:
2810 2810 timer, fm = gettimer(ui, opts)
2811 2811 timer(fn, title=title)
2812 2812 fm.end()
2813 2813
2814 2814 @command(b'perfwrite', formatteropts)
2815 2815 def perfwrite(ui, repo, **opts):
2816 2816 """microbenchmark ui.write
2817 2817 """
2818 2818 opts = _byteskwargs(opts)
2819 2819
2820 2820 timer, fm = gettimer(ui, opts)
2821 2821 def write():
2822 2822 for i in range(100000):
2823 2823 ui.write((b'Testing write performance\n'))
2824 2824 timer(write)
2825 2825 fm.end()
2826 2826
2827 2827 def uisetup(ui):
2828 2828 if (util.safehasattr(cmdutil, b'openrevlog') and
2829 2829 not util.safehasattr(commands, b'debugrevlogopts')):
2830 2830 # for "historical portability":
2831 2831 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
2832 2832 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
2833 2833 # openrevlog() should cause failure, because it has been
2834 2834 # available since 3.5 (or 49c583ca48c4).
2835 2835 def openrevlog(orig, repo, cmd, file_, opts):
2836 2836 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
2837 2837 raise error.Abort(b"This version doesn't support --dir option",
2838 2838 hint=b"use 3.5 or later")
2839 2839 return orig(repo, cmd, file_, opts)
2840 2840 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
2841 2841
2842 2842 @command(b'perfprogress', formatteropts + [
2843 2843 (b'', b'topic', b'topic', b'topic for progress messages'),
2844 2844 (b'c', b'total', 1000000, b'total value we are progressing to'),
2845 2845 ], norepo=True)
2846 2846 def perfprogress(ui, topic=None, total=None, **opts):
2847 2847 """printing of progress bars"""
2848 2848 opts = _byteskwargs(opts)
2849 2849
2850 2850 timer, fm = gettimer(ui, opts)
2851 2851
2852 2852 def doprogress():
2853 2853 with ui.makeprogress(topic, total=total) as progress:
2854 2854 for i in pycompat.xrange(total):
2855 2855 progress.increment()
2856 2856
2857 2857 timer(doprogress)
2858 2858 fm.end()
@@ -1,355 +1,356
1 1 #require test-repo
2 2
3 3 Set vars:
4 4
5 5 $ . "$TESTDIR/helpers-testrepo.sh"
6 6 $ CONTRIBDIR="$TESTDIR/../contrib"
7 7
8 8 Prepare repo:
9 9
10 10 $ hg init
11 11
12 12 $ echo this is file a > a
13 13 $ hg add a
14 14 $ hg commit -m first
15 15
16 16 $ echo adding to file a >> a
17 17 $ hg commit -m second
18 18
19 19 $ echo adding more to file a >> a
20 20 $ hg commit -m third
21 21
22 22 $ hg up -r 0
23 23 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
24 24 $ echo merge-this >> a
25 25 $ hg commit -m merge-able
26 26 created new head
27 27
28 28 $ hg up -r 2
29 29 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
30 30
31 31 perfstatus
32 32
33 33 $ cat >> $HGRCPATH << EOF
34 34 > [extensions]
35 35 > perf=$CONTRIBDIR/perf.py
36 36 > [perf]
37 37 > presleep=0
38 38 > stub=on
39 39 > parentscount=1
40 40 > EOF
41 41 $ hg help -e perf
42 42 perf extension - helper extension to measure performance
43 43
44 44 Configurations
45 45 ==============
46 46
47 47 "perf"
48 48 ------
49 49
50 50 "all-timing"
51 51 When set, additional statistics will be reported for each benchmark: best,
52 52 worst, median average. If not set only the best timing is reported
53 53 (default: off).
54 54
55 55 "presleep"
56 56 number of second to wait before any group of runs (default: 1)
57 57
58 58 "run-limits"
59 59 Control the number of runs each benchmark will perform. The option value
60 60 should be a list of '<time>-<numberofrun>' pairs. After each run the
61 61 conditions are considered in order with the following logic:
62 62
63 63 If benchmark has been running for <time> seconds, and we have performed
64 64 <numberofrun> iterations, stop the benchmark,
65 65
66 66 The default value is: '3.0-100, 10.0-3'
67 67
68 68 "stub"
69 69 When set, benchmarks will only be run once, useful for testing (default:
70 70 off)
71 71
72 72 list of commands:
73 73
74 74 perfaddremove
75 75 (no help text available)
76 76 perfancestors
77 77 (no help text available)
78 78 perfancestorset
79 79 (no help text available)
80 80 perfannotate (no help text available)
81 81 perfbdiff benchmark a bdiff between revisions
82 82 perfbookmarks
83 83 benchmark parsing bookmarks from disk to memory
84 84 perfbranchmap
85 85 benchmark the update of a branchmap
86 86 perfbranchmapload
87 87 benchmark reading the branchmap
88 88 perfbranchmapupdate
89 89 benchmark branchmap update from for <base> revs to <target>
90 90 revs
91 91 perfbundleread
92 92 Benchmark reading of bundle files.
93 93 perfcca (no help text available)
94 94 perfchangegroupchangelog
95 95 Benchmark producing a changelog group for a changegroup.
96 96 perfchangeset
97 97 (no help text available)
98 98 perfctxfiles (no help text available)
99 99 perfdiffwd Profile diff of working directory changes
100 100 perfdirfoldmap
101 101 (no help text available)
102 102 perfdirs (no help text available)
103 103 perfdirstate (no help text available)
104 104 perfdirstatedirs
105 105 (no help text available)
106 106 perfdirstatefoldmap
107 107 (no help text available)
108 108 perfdirstatewrite
109 109 (no help text available)
110 110 perfdiscovery
111 111 benchmark discovery between local repo and the peer at given
112 112 path
113 113 perffncacheencode
114 114 (no help text available)
115 115 perffncacheload
116 116 (no help text available)
117 117 perffncachewrite
118 118 (no help text available)
119 119 perfheads benchmark the computation of a changelog heads
120 120 perfhelper-pathcopies
121 121 find statistic about potential parameters for the
122 122 'perftracecopies'
123 123 perfignore benchmark operation related to computing ignore
124 124 perfindex benchmark index creation time followed by a lookup
125 125 perflinelogedits
126 126 (no help text available)
127 127 perfloadmarkers
128 128 benchmark the time to parse the on-disk markers for a repo
129 129 perflog (no help text available)
130 130 perflookup (no help text available)
131 131 perflrucachedict
132 132 (no help text available)
133 133 perfmanifest benchmark the time to read a manifest from disk and return a
134 134 usable
135 135 perfmergecalculate
136 136 (no help text available)
137 137 perfmoonwalk benchmark walking the changelog backwards
138 138 perfnodelookup
139 139 (no help text available)
140 140 perfnodemap benchmark the time necessary to look up revision from a cold
141 141 nodemap
142 142 perfparents benchmark the time necessary to fetch one changeset's parents.
143 143 perfpathcopies
144 144 benchmark the copy tracing logic
145 145 perfphases benchmark phasesets computation
146 146 perfphasesremote
147 147 benchmark time needed to analyse phases of the remote server
148 148 perfprogress printing of progress bars
149 149 perfrawfiles (no help text available)
150 150 perfrevlogchunks
151 151 Benchmark operations on revlog chunks.
152 152 perfrevlogindex
153 153 Benchmark operations against a revlog index.
154 154 perfrevlogrevision
155 155 Benchmark obtaining a revlog revision.
156 156 perfrevlogrevisions
157 157 Benchmark reading a series of revisions from a revlog.
158 158 perfrevlogwrite
159 159 Benchmark writing a series of revisions to a revlog.
160 160 perfrevrange (no help text available)
161 161 perfrevset benchmark the execution time of a revset
162 162 perfstartup (no help text available)
163 163 perfstatus (no help text available)
164 164 perftags (no help text available)
165 165 perftemplating
166 166 test the rendering time of a given template
167 167 perfunidiff benchmark a unified diff between revisions
168 168 perfvolatilesets
169 169 benchmark the computation of various volatile set
170 170 perfwalk (no help text available)
171 171 perfwrite microbenchmark ui.write
172 172
173 173 (use 'hg help -v perf' to show built-in aliases and global options)
174 174 $ hg perfaddremove
175 175 $ hg perfancestors
176 176 $ hg perfancestorset 2
177 177 $ hg perfannotate a
178 178 $ hg perfbdiff -c 1
179 179 $ hg perfbdiff --alldata 1
180 180 $ hg perfunidiff -c 1
181 181 $ hg perfunidiff --alldata 1
182 182 $ hg perfbookmarks
183 183 $ hg perfbranchmap
184 184 $ hg perfbranchmapload
185 185 $ hg perfbranchmapupdate --base "not tip" --target "tip"
186 186 benchmark of branchmap with 3 revisions with 1 new ones
187 187 $ hg perfcca
188 188 $ hg perfchangegroupchangelog
189 189 $ hg perfchangegroupchangelog --cgversion 01
190 190 $ hg perfchangeset 2
191 191 $ hg perfctxfiles 2
192 192 $ hg perfdiffwd
193 193 $ hg perfdirfoldmap
194 194 $ hg perfdirs
195 195 $ hg perfdirstate
196 196 $ hg perfdirstatedirs
197 197 $ hg perfdirstatefoldmap
198 198 $ hg perfdirstatewrite
199 199 #if repofncache
200 200 $ hg perffncacheencode
201 201 $ hg perffncacheload
202 202 $ hg debugrebuildfncache
203 203 fncache already up to date
204 204 $ hg perffncachewrite
205 205 $ hg debugrebuildfncache
206 206 fncache already up to date
207 207 #endif
208 208 $ hg perfheads
209 209 $ hg perfignore
210 210 $ hg perfindex
211 211 $ hg perflinelogedits -n 1
212 212 $ hg perfloadmarkers
213 213 $ hg perflog
214 214 $ hg perflookup 2
215 215 $ hg perflrucache
216 216 $ hg perfmanifest 2
217 217 $ hg perfmanifest -m 44fe2c8352bb3a478ffd7d8350bbc721920134d1
218 218 $ hg perfmanifest -m 44fe2c8352bb
219 219 abort: manifest revision must be integer or full node
220 220 [255]
221 221 $ hg perfmergecalculate -r 3
222 222 $ hg perfmoonwalk
223 223 $ hg perfnodelookup 2
224 224 $ hg perfpathcopies 1 2
225 225 $ hg perfprogress --total 1000
226 226 $ hg perfrawfiles 2
227 227 $ hg perfrevlogindex -c
228 228 #if reporevlogstore
229 229 $ hg perfrevlogrevisions .hg/store/data/a.i
230 230 #endif
231 231 $ hg perfrevlogrevision -m 0
232 232 $ hg perfrevlogchunks -c
233 233 $ hg perfrevrange
234 234 $ hg perfrevset 'all()'
235 235 $ hg perfstartup
236 236 $ hg perfstatus
237 237 $ hg perftags
238 238 $ hg perftemplating
239 239 $ hg perfvolatilesets
240 240 $ hg perfwalk
241 241 $ hg perfparents
242 242 $ hg perfdiscovery -q .
243 243
244 244 Test run control
245 245 ----------------
246 246
247 247 Simple single entry
248 248
249 249 $ hg perfparents --config perf.stub=no --config perf.run-limits='0.000000001-15'
250 250 ! wall * comb * user * sys * (best of 15) (glob)
251 251
252 252 Multiple entries
253 253
254 254 $ hg perfparents --config perf.stub=no --config perf.run-limits='500000-1, 0.000000001-5'
255 255 ! wall * comb * user * sys * (best of 5) (glob)
256 256
257 257 error case are ignored
258 258
259 259 $ hg perfparents --config perf.stub=no --config perf.run-limits='500, 0.000000001-5'
260 260 malformatted run limit entry, missing "-": 500
261 261 ! wall * comb * user * sys * (best of 5) (glob)
262 262 $ hg perfparents --config perf.stub=no --config perf.run-limits='aaa-12, 0.000000001-5'
263 malformatted run limit entry, could not convert string to float: aaa: aaa-12
263 malformatted run limit entry, could not convert string to float: aaa: aaa-12 (no-py3 !)
264 malformatted run limit entry, could not convert string to float: 'aaa': aaa-12 (py3 !)
264 265 ! wall * comb * user * sys * (best of 5) (glob)
265 266 $ hg perfparents --config perf.stub=no --config perf.run-limits='12-aaaaaa, 0.000000001-5'
266 267 malformatted run limit entry, invalid literal for int() with base 10: 'aaaaaa': 12-aaaaaa
267 268 ! wall * comb * user * sys * (best of 5) (glob)
268 269
269 270 test actual output
270 271 ------------------
271 272
272 273 normal output:
273 274
274 275 $ hg perfheads --config perf.stub=no
275 276 ! wall * comb * user * sys * (best of *) (glob)
276 277
277 278 detailed output:
278 279
279 280 $ hg perfheads --config perf.all-timing=yes --config perf.stub=no
280 281 ! wall * comb * user * sys * (best of *) (glob)
281 282 ! wall * comb * user * sys * (max of *) (glob)
282 283 ! wall * comb * user * sys * (avg of *) (glob)
283 284 ! wall * comb * user * sys * (median of *) (glob)
284 285
285 286 test json output
286 287 ----------------
287 288
288 289 normal output:
289 290
290 291 $ hg perfheads --template json --config perf.stub=no
291 292 [
292 293 {
293 294 "comb": *, (glob)
294 295 "count": *, (glob)
295 296 "sys": *, (glob)
296 297 "user": *, (glob)
297 298 "wall": * (glob)
298 299 }
299 300 ]
300 301
301 302 detailed output:
302 303
303 304 $ hg perfheads --template json --config perf.all-timing=yes --config perf.stub=no
304 305 [
305 306 {
306 307 "avg.comb": *, (glob)
307 308 "avg.count": *, (glob)
308 309 "avg.sys": *, (glob)
309 310 "avg.user": *, (glob)
310 311 "avg.wall": *, (glob)
311 312 "comb": *, (glob)
312 313 "count": *, (glob)
313 314 "max.comb": *, (glob)
314 315 "max.count": *, (glob)
315 316 "max.sys": *, (glob)
316 317 "max.user": *, (glob)
317 318 "max.wall": *, (glob)
318 319 "median.comb": *, (glob)
319 320 "median.count": *, (glob)
320 321 "median.sys": *, (glob)
321 322 "median.user": *, (glob)
322 323 "median.wall": *, (glob)
323 324 "sys": *, (glob)
324 325 "user": *, (glob)
325 326 "wall": * (glob)
326 327 }
327 328 ]
328 329
329 330 Check perf.py for historical portability
330 331 ----------------------------------------
331 332
332 333 $ cd "$TESTDIR/.."
333 334
334 335 $ (testrepohg files -r 1.2 glob:mercurial/*.c glob:mercurial/*.py;
335 336 > testrepohg files -r tip glob:mercurial/*.c glob:mercurial/*.py) |
336 337 > "$TESTDIR"/check-perf-code.py contrib/perf.py
337 338 contrib/perf.py:\d+: (re)
338 339 > from mercurial import (
339 340 import newer module separately in try clause for early Mercurial
340 341 contrib/perf.py:\d+: (re)
341 342 > from mercurial import (
342 343 import newer module separately in try clause for early Mercurial
343 344 contrib/perf.py:\d+: (re)
344 345 > origindexpath = orig.opener.join(orig.indexfile)
345 346 use getvfs()/getsvfs() for early Mercurial
346 347 contrib/perf.py:\d+: (re)
347 348 > origdatapath = orig.opener.join(orig.datafile)
348 349 use getvfs()/getsvfs() for early Mercurial
349 350 contrib/perf.py:\d+: (re)
350 351 > vfs = vfsmod.vfs(tmpdir)
351 352 use getvfs()/getsvfs() for early Mercurial
352 353 contrib/perf.py:\d+: (re)
353 354 > vfs.options = getattr(orig.opener, 'options', None)
354 355 use getvfs()/getsvfs() for early Mercurial
355 356 [1]
General Comments 0
You need to be logged in to leave comments. Login now