##// END OF EJS Templates
perf: add a perfnodemap command...
Boris Feld -
r41507:22919858 default draft
parent child Browse files
Show More
@@ -1,2701 +1,2754
1 1 # perf.py - performance test routines
2 2 '''helper extension to measure performance'''
3 3
4 4 # "historical portability" policy of perf.py:
5 5 #
6 6 # We have to do:
7 7 # - make perf.py "loadable" with as wide Mercurial version as possible
8 8 # This doesn't mean that perf commands work correctly with that Mercurial.
9 9 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
10 10 # - make historical perf command work correctly with as wide Mercurial
11 11 # version as possible
12 12 #
13 13 # We have to do, if possible with reasonable cost:
14 14 # - make recent perf command for historical feature work correctly
15 15 # with early Mercurial
16 16 #
17 17 # We don't have to do:
18 18 # - make perf command for recent feature work correctly with early
19 19 # Mercurial
20 20
21 21 from __future__ import absolute_import
22 22 import contextlib
23 23 import functools
24 24 import gc
25 25 import os
26 26 import random
27 27 import shutil
28 28 import struct
29 29 import sys
30 30 import tempfile
31 31 import threading
32 32 import time
33 33 from mercurial import (
34 34 changegroup,
35 35 cmdutil,
36 36 commands,
37 37 copies,
38 38 error,
39 39 extensions,
40 40 hg,
41 41 mdiff,
42 42 merge,
43 43 revlog,
44 44 util,
45 45 )
46 46
47 47 # for "historical portability":
48 48 # try to import modules separately (in dict order), and ignore
49 49 # failure, because these aren't available with early Mercurial
50 50 try:
51 51 from mercurial import branchmap # since 2.5 (or bcee63733aad)
52 52 except ImportError:
53 53 pass
54 54 try:
55 55 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
56 56 except ImportError:
57 57 pass
58 58 try:
59 59 from mercurial import registrar # since 3.7 (or 37d50250b696)
60 60 dir(registrar) # forcibly load it
61 61 except ImportError:
62 62 registrar = None
63 63 try:
64 64 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
65 65 except ImportError:
66 66 pass
67 67 try:
68 68 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
69 69 except ImportError:
70 70 pass
71 71 try:
72 72 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
73 73 except ImportError:
74 74 pass
75 75
76 76
77 77 def identity(a):
78 78 return a
79 79
80 80 try:
81 81 from mercurial import pycompat
82 82 getargspec = pycompat.getargspec # added to module after 4.5
83 83 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
84 84 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
85 85 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
86 86 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
87 87 if pycompat.ispy3:
88 88 _maxint = sys.maxsize # per py3 docs for replacing maxint
89 89 else:
90 90 _maxint = sys.maxint
91 91 except (ImportError, AttributeError):
92 92 import inspect
93 93 getargspec = inspect.getargspec
94 94 _byteskwargs = identity
95 95 fsencode = identity # no py3 support
96 96 _maxint = sys.maxint # no py3 support
97 97 _sysstr = lambda x: x # no py3 support
98 98 _xrange = xrange
99 99
100 100 try:
101 101 # 4.7+
102 102 queue = pycompat.queue.Queue
103 103 except (AttributeError, ImportError):
104 104 # <4.7.
105 105 try:
106 106 queue = pycompat.queue
107 107 except (AttributeError, ImportError):
108 108 queue = util.queue
109 109
110 110 try:
111 111 from mercurial import logcmdutil
112 112 makelogtemplater = logcmdutil.maketemplater
113 113 except (AttributeError, ImportError):
114 114 try:
115 115 makelogtemplater = cmdutil.makelogtemplater
116 116 except (AttributeError, ImportError):
117 117 makelogtemplater = None
118 118
119 119 # for "historical portability":
120 120 # define util.safehasattr forcibly, because util.safehasattr has been
121 121 # available since 1.9.3 (or 94b200a11cf7)
122 122 _undefined = object()
123 123 def safehasattr(thing, attr):
124 124 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
125 125 setattr(util, 'safehasattr', safehasattr)
126 126
127 127 # for "historical portability":
128 128 # define util.timer forcibly, because util.timer has been available
129 129 # since ae5d60bb70c9
130 130 if safehasattr(time, 'perf_counter'):
131 131 util.timer = time.perf_counter
132 132 elif os.name == b'nt':
133 133 util.timer = time.clock
134 134 else:
135 135 util.timer = time.time
136 136
137 137 # for "historical portability":
138 138 # use locally defined empty option list, if formatteropts isn't
139 139 # available, because commands.formatteropts has been available since
140 140 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
141 141 # available since 2.2 (or ae5f92e154d3)
142 142 formatteropts = getattr(cmdutil, "formatteropts",
143 143 getattr(commands, "formatteropts", []))
144 144
145 145 # for "historical portability":
146 146 # use locally defined option list, if debugrevlogopts isn't available,
147 147 # because commands.debugrevlogopts has been available since 3.7 (or
148 148 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
149 149 # since 1.9 (or a79fea6b3e77).
150 150 revlogopts = getattr(cmdutil, "debugrevlogopts",
151 151 getattr(commands, "debugrevlogopts", [
152 152 (b'c', b'changelog', False, (b'open changelog')),
153 153 (b'm', b'manifest', False, (b'open manifest')),
154 154 (b'', b'dir', False, (b'open directory manifest')),
155 155 ]))
156 156
157 157 cmdtable = {}
158 158
159 159 # for "historical portability":
160 160 # define parsealiases locally, because cmdutil.parsealiases has been
161 161 # available since 1.5 (or 6252852b4332)
162 162 def parsealiases(cmd):
163 163 return cmd.split(b"|")
164 164
165 165 if safehasattr(registrar, 'command'):
166 166 command = registrar.command(cmdtable)
167 167 elif safehasattr(cmdutil, 'command'):
168 168 command = cmdutil.command(cmdtable)
169 169 if b'norepo' not in getargspec(command).args:
170 170 # for "historical portability":
171 171 # wrap original cmdutil.command, because "norepo" option has
172 172 # been available since 3.1 (or 75a96326cecb)
173 173 _command = command
174 174 def command(name, options=(), synopsis=None, norepo=False):
175 175 if norepo:
176 176 commands.norepo += b' %s' % b' '.join(parsealiases(name))
177 177 return _command(name, list(options), synopsis)
178 178 else:
179 179 # for "historical portability":
180 180 # define "@command" annotation locally, because cmdutil.command
181 181 # has been available since 1.9 (or 2daa5179e73f)
182 182 def command(name, options=(), synopsis=None, norepo=False):
183 183 def decorator(func):
184 184 if synopsis:
185 185 cmdtable[name] = func, list(options), synopsis
186 186 else:
187 187 cmdtable[name] = func, list(options)
188 188 if norepo:
189 189 commands.norepo += b' %s' % b' '.join(parsealiases(name))
190 190 return func
191 191 return decorator
192 192
193 193 try:
194 194 import mercurial.registrar
195 195 import mercurial.configitems
196 196 configtable = {}
197 197 configitem = mercurial.registrar.configitem(configtable)
198 198 configitem(b'perf', b'presleep',
199 199 default=mercurial.configitems.dynamicdefault,
200 200 )
201 201 configitem(b'perf', b'stub',
202 202 default=mercurial.configitems.dynamicdefault,
203 203 )
204 204 configitem(b'perf', b'parentscount',
205 205 default=mercurial.configitems.dynamicdefault,
206 206 )
207 207 configitem(b'perf', b'all-timing',
208 208 default=mercurial.configitems.dynamicdefault,
209 209 )
210 210 except (ImportError, AttributeError):
211 211 pass
212 212
213 213 def getlen(ui):
214 214 if ui.configbool(b"perf", b"stub", False):
215 215 return lambda x: 1
216 216 return len
217 217
218 218 def gettimer(ui, opts=None):
219 219 """return a timer function and formatter: (timer, formatter)
220 220
221 221 This function exists to gather the creation of formatter in a single
222 222 place instead of duplicating it in all performance commands."""
223 223
224 224 # enforce an idle period before execution to counteract power management
225 225 # experimental config: perf.presleep
226 226 time.sleep(getint(ui, b"perf", b"presleep", 1))
227 227
228 228 if opts is None:
229 229 opts = {}
230 230 # redirect all to stderr unless buffer api is in use
231 231 if not ui._buffers:
232 232 ui = ui.copy()
233 233 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
234 234 if uifout:
235 235 # for "historical portability":
236 236 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
237 237 uifout.set(ui.ferr)
238 238
239 239 # get a formatter
240 240 uiformatter = getattr(ui, 'formatter', None)
241 241 if uiformatter:
242 242 fm = uiformatter(b'perf', opts)
243 243 else:
244 244 # for "historical portability":
245 245 # define formatter locally, because ui.formatter has been
246 246 # available since 2.2 (or ae5f92e154d3)
247 247 from mercurial import node
248 248 class defaultformatter(object):
249 249 """Minimized composition of baseformatter and plainformatter
250 250 """
251 251 def __init__(self, ui, topic, opts):
252 252 self._ui = ui
253 253 if ui.debugflag:
254 254 self.hexfunc = node.hex
255 255 else:
256 256 self.hexfunc = node.short
257 257 def __nonzero__(self):
258 258 return False
259 259 __bool__ = __nonzero__
260 260 def startitem(self):
261 261 pass
262 262 def data(self, **data):
263 263 pass
264 264 def write(self, fields, deftext, *fielddata, **opts):
265 265 self._ui.write(deftext % fielddata, **opts)
266 266 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
267 267 if cond:
268 268 self._ui.write(deftext % fielddata, **opts)
269 269 def plain(self, text, **opts):
270 270 self._ui.write(text, **opts)
271 271 def end(self):
272 272 pass
273 273 fm = defaultformatter(ui, b'perf', opts)
274 274
275 275 # stub function, runs code only once instead of in a loop
276 276 # experimental config: perf.stub
277 277 if ui.configbool(b"perf", b"stub", False):
278 278 return functools.partial(stub_timer, fm), fm
279 279
280 280 # experimental config: perf.all-timing
281 281 displayall = ui.configbool(b"perf", b"all-timing", False)
282 282 return functools.partial(_timer, fm, displayall=displayall), fm
283 283
284 284 def stub_timer(fm, func, setup=None, title=None):
285 285 if setup is not None:
286 286 setup()
287 287 func()
288 288
289 289 @contextlib.contextmanager
290 290 def timeone():
291 291 r = []
292 292 ostart = os.times()
293 293 cstart = util.timer()
294 294 yield r
295 295 cstop = util.timer()
296 296 ostop = os.times()
297 297 a, b = ostart, ostop
298 298 r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
299 299
300 300 def _timer(fm, func, setup=None, title=None, displayall=False):
301 301 gc.collect()
302 302 results = []
303 303 begin = util.timer()
304 304 count = 0
305 305 while True:
306 306 if setup is not None:
307 307 setup()
308 308 with timeone() as item:
309 309 r = func()
310 310 count += 1
311 311 results.append(item[0])
312 312 cstop = util.timer()
313 313 if cstop - begin > 3 and count >= 100:
314 314 break
315 315 if cstop - begin > 10 and count >= 3:
316 316 break
317 317
318 318 formatone(fm, results, title=title, result=r,
319 319 displayall=displayall)
320 320
321 321 def formatone(fm, timings, title=None, result=None, displayall=False):
322 322
323 323 count = len(timings)
324 324
325 325 fm.startitem()
326 326
327 327 if title:
328 328 fm.write(b'title', b'! %s\n', title)
329 329 if result:
330 330 fm.write(b'result', b'! result: %s\n', result)
331 331 def display(role, entry):
332 332 prefix = b''
333 333 if role != b'best':
334 334 prefix = b'%s.' % role
335 335 fm.plain(b'!')
336 336 fm.write(prefix + b'wall', b' wall %f', entry[0])
337 337 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
338 338 fm.write(prefix + b'user', b' user %f', entry[1])
339 339 fm.write(prefix + b'sys', b' sys %f', entry[2])
340 340 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
341 341 fm.plain(b'\n')
342 342 timings.sort()
343 343 min_val = timings[0]
344 344 display(b'best', min_val)
345 345 if displayall:
346 346 max_val = timings[-1]
347 347 display(b'max', max_val)
348 348 avg = tuple([sum(x) / count for x in zip(*timings)])
349 349 display(b'avg', avg)
350 350 median = timings[len(timings) // 2]
351 351 display(b'median', median)
352 352
353 353 # utilities for historical portability
354 354
355 355 def getint(ui, section, name, default):
356 356 # for "historical portability":
357 357 # ui.configint has been available since 1.9 (or fa2b596db182)
358 358 v = ui.config(section, name, None)
359 359 if v is None:
360 360 return default
361 361 try:
362 362 return int(v)
363 363 except ValueError:
364 364 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
365 365 % (section, name, v))
366 366
367 367 def safeattrsetter(obj, name, ignoremissing=False):
368 368 """Ensure that 'obj' has 'name' attribute before subsequent setattr
369 369
370 370 This function is aborted, if 'obj' doesn't have 'name' attribute
371 371 at runtime. This avoids overlooking removal of an attribute, which
372 372 breaks assumption of performance measurement, in the future.
373 373
374 374 This function returns the object to (1) assign a new value, and
375 375 (2) restore an original value to the attribute.
376 376
377 377 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
378 378 abortion, and this function returns None. This is useful to
379 379 examine an attribute, which isn't ensured in all Mercurial
380 380 versions.
381 381 """
382 382 if not util.safehasattr(obj, name):
383 383 if ignoremissing:
384 384 return None
385 385 raise error.Abort((b"missing attribute %s of %s might break assumption"
386 386 b" of performance measurement") % (name, obj))
387 387
388 388 origvalue = getattr(obj, _sysstr(name))
389 389 class attrutil(object):
390 390 def set(self, newvalue):
391 391 setattr(obj, _sysstr(name), newvalue)
392 392 def restore(self):
393 393 setattr(obj, _sysstr(name), origvalue)
394 394
395 395 return attrutil()
396 396
397 397 # utilities to examine each internal API changes
398 398
399 399 def getbranchmapsubsettable():
400 400 # for "historical portability":
401 401 # subsettable is defined in:
402 402 # - branchmap since 2.9 (or 175c6fd8cacc)
403 403 # - repoview since 2.5 (or 59a9f18d4587)
404 404 for mod in (branchmap, repoview):
405 405 subsettable = getattr(mod, 'subsettable', None)
406 406 if subsettable:
407 407 return subsettable
408 408
409 409 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
410 410 # branchmap and repoview modules exist, but subsettable attribute
411 411 # doesn't)
412 412 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
413 413 hint=b"use 2.5 or later")
414 414
415 415 def getsvfs(repo):
416 416 """Return appropriate object to access files under .hg/store
417 417 """
418 418 # for "historical portability":
419 419 # repo.svfs has been available since 2.3 (or 7034365089bf)
420 420 svfs = getattr(repo, 'svfs', None)
421 421 if svfs:
422 422 return svfs
423 423 else:
424 424 return getattr(repo, 'sopener')
425 425
426 426 def getvfs(repo):
427 427 """Return appropriate object to access files under .hg
428 428 """
429 429 # for "historical portability":
430 430 # repo.vfs has been available since 2.3 (or 7034365089bf)
431 431 vfs = getattr(repo, 'vfs', None)
432 432 if vfs:
433 433 return vfs
434 434 else:
435 435 return getattr(repo, 'opener')
436 436
437 437 def repocleartagscachefunc(repo):
438 438 """Return the function to clear tags cache according to repo internal API
439 439 """
440 440 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
441 441 # in this case, setattr(repo, '_tagscache', None) or so isn't
442 442 # correct way to clear tags cache, because existing code paths
443 443 # expect _tagscache to be a structured object.
444 444 def clearcache():
445 445 # _tagscache has been filteredpropertycache since 2.5 (or
446 446 # 98c867ac1330), and delattr() can't work in such case
447 447 if b'_tagscache' in vars(repo):
448 448 del repo.__dict__[b'_tagscache']
449 449 return clearcache
450 450
451 451 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
452 452 if repotags: # since 1.4 (or 5614a628d173)
453 453 return lambda : repotags.set(None)
454 454
455 455 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
456 456 if repotagscache: # since 0.6 (or d7df759d0e97)
457 457 return lambda : repotagscache.set(None)
458 458
459 459 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
460 460 # this point, but it isn't so problematic, because:
461 461 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
462 462 # in perftags() causes failure soon
463 463 # - perf.py itself has been available since 1.1 (or eb240755386d)
464 464 raise error.Abort((b"tags API of this hg command is unknown"))
465 465
466 466 # utilities to clear cache
467 467
468 468 def clearfilecache(obj, attrname):
469 469 unfiltered = getattr(obj, 'unfiltered', None)
470 470 if unfiltered is not None:
471 471 obj = obj.unfiltered()
472 472 if attrname in vars(obj):
473 473 delattr(obj, attrname)
474 474 obj._filecache.pop(attrname, None)
475 475
476 476 def clearchangelog(repo):
477 477 if repo is not repo.unfiltered():
478 478 object.__setattr__(repo, r'_clcachekey', None)
479 479 object.__setattr__(repo, r'_clcache', None)
480 480 clearfilecache(repo.unfiltered(), 'changelog')
481 481
482 482 # perf commands
483 483
484 484 @command(b'perfwalk', formatteropts)
485 485 def perfwalk(ui, repo, *pats, **opts):
486 486 opts = _byteskwargs(opts)
487 487 timer, fm = gettimer(ui, opts)
488 488 m = scmutil.match(repo[None], pats, {})
489 489 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
490 490 ignored=False))))
491 491 fm.end()
492 492
493 493 @command(b'perfannotate', formatteropts)
494 494 def perfannotate(ui, repo, f, **opts):
495 495 opts = _byteskwargs(opts)
496 496 timer, fm = gettimer(ui, opts)
497 497 fc = repo[b'.'][f]
498 498 timer(lambda: len(fc.annotate(True)))
499 499 fm.end()
500 500
501 501 @command(b'perfstatus',
502 502 [(b'u', b'unknown', False,
503 503 b'ask status to look for unknown files')] + formatteropts)
504 504 def perfstatus(ui, repo, **opts):
505 505 opts = _byteskwargs(opts)
506 506 #m = match.always(repo.root, repo.getcwd())
507 507 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
508 508 # False))))
509 509 timer, fm = gettimer(ui, opts)
510 510 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
511 511 fm.end()
512 512
513 513 @command(b'perfaddremove', formatteropts)
514 514 def perfaddremove(ui, repo, **opts):
515 515 opts = _byteskwargs(opts)
516 516 timer, fm = gettimer(ui, opts)
517 517 try:
518 518 oldquiet = repo.ui.quiet
519 519 repo.ui.quiet = True
520 520 matcher = scmutil.match(repo[None])
521 521 opts[b'dry_run'] = True
522 522 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
523 523 finally:
524 524 repo.ui.quiet = oldquiet
525 525 fm.end()
526 526
527 527 def clearcaches(cl):
528 528 # behave somewhat consistently across internal API changes
529 529 if util.safehasattr(cl, b'clearcaches'):
530 530 cl.clearcaches()
531 531 elif util.safehasattr(cl, b'_nodecache'):
532 532 from mercurial.node import nullid, nullrev
533 533 cl._nodecache = {nullid: nullrev}
534 534 cl._nodepos = None
535 535
536 536 @command(b'perfheads', formatteropts)
537 537 def perfheads(ui, repo, **opts):
538 538 """benchmark the computation of a changelog heads"""
539 539 opts = _byteskwargs(opts)
540 540 timer, fm = gettimer(ui, opts)
541 541 cl = repo.changelog
542 542 def s():
543 543 clearcaches(cl)
544 544 def d():
545 545 len(cl.headrevs())
546 546 timer(d, setup=s)
547 547 fm.end()
548 548
549 549 @command(b'perftags', formatteropts+
550 550 [
551 551 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
552 552 ])
553 553 def perftags(ui, repo, **opts):
554 554 opts = _byteskwargs(opts)
555 555 timer, fm = gettimer(ui, opts)
556 556 repocleartagscache = repocleartagscachefunc(repo)
557 557 clearrevlogs = opts[b'clear_revlogs']
558 558 def s():
559 559 if clearrevlogs:
560 560 clearchangelog(repo)
561 561 clearfilecache(repo.unfiltered(), 'manifest')
562 562 repocleartagscache()
563 563 def t():
564 564 return len(repo.tags())
565 565 timer(t, setup=s)
566 566 fm.end()
567 567
568 568 @command(b'perfancestors', formatteropts)
569 569 def perfancestors(ui, repo, **opts):
570 570 opts = _byteskwargs(opts)
571 571 timer, fm = gettimer(ui, opts)
572 572 heads = repo.changelog.headrevs()
573 573 def d():
574 574 for a in repo.changelog.ancestors(heads):
575 575 pass
576 576 timer(d)
577 577 fm.end()
578 578
579 579 @command(b'perfancestorset', formatteropts)
580 580 def perfancestorset(ui, repo, revset, **opts):
581 581 opts = _byteskwargs(opts)
582 582 timer, fm = gettimer(ui, opts)
583 583 revs = repo.revs(revset)
584 584 heads = repo.changelog.headrevs()
585 585 def d():
586 586 s = repo.changelog.ancestors(heads)
587 587 for rev in revs:
588 588 rev in s
589 589 timer(d)
590 590 fm.end()
591 591
592 592 @command(b'perfdiscovery', formatteropts, b'PATH')
593 593 def perfdiscovery(ui, repo, path, **opts):
594 594 """benchmark discovery between local repo and the peer at given path
595 595 """
596 596 repos = [repo, None]
597 597 timer, fm = gettimer(ui, opts)
598 598 path = ui.expandpath(path)
599 599
600 600 def s():
601 601 repos[1] = hg.peer(ui, opts, path)
602 602 def d():
603 603 setdiscovery.findcommonheads(ui, *repos)
604 604 timer(d, setup=s)
605 605 fm.end()
606 606
607 607 @command(b'perfbookmarks', formatteropts +
608 608 [
609 609 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
610 610 ])
611 611 def perfbookmarks(ui, repo, **opts):
612 612 """benchmark parsing bookmarks from disk to memory"""
613 613 opts = _byteskwargs(opts)
614 614 timer, fm = gettimer(ui, opts)
615 615
616 616 clearrevlogs = opts[b'clear_revlogs']
617 617 def s():
618 618 if clearrevlogs:
619 619 clearchangelog(repo)
620 620 clearfilecache(repo, b'_bookmarks')
621 621 def d():
622 622 repo._bookmarks
623 623 timer(d, setup=s)
624 624 fm.end()
625 625
626 626 @command(b'perfbundleread', formatteropts, b'BUNDLE')
627 627 def perfbundleread(ui, repo, bundlepath, **opts):
628 628 """Benchmark reading of bundle files.
629 629
630 630 This command is meant to isolate the I/O part of bundle reading as
631 631 much as possible.
632 632 """
633 633 from mercurial import (
634 634 bundle2,
635 635 exchange,
636 636 streamclone,
637 637 )
638 638
639 639 opts = _byteskwargs(opts)
640 640
641 641 def makebench(fn):
642 642 def run():
643 643 with open(bundlepath, b'rb') as fh:
644 644 bundle = exchange.readbundle(ui, fh, bundlepath)
645 645 fn(bundle)
646 646
647 647 return run
648 648
649 649 def makereadnbytes(size):
650 650 def run():
651 651 with open(bundlepath, b'rb') as fh:
652 652 bundle = exchange.readbundle(ui, fh, bundlepath)
653 653 while bundle.read(size):
654 654 pass
655 655
656 656 return run
657 657
658 658 def makestdioread(size):
659 659 def run():
660 660 with open(bundlepath, b'rb') as fh:
661 661 while fh.read(size):
662 662 pass
663 663
664 664 return run
665 665
666 666 # bundle1
667 667
668 668 def deltaiter(bundle):
669 669 for delta in bundle.deltaiter():
670 670 pass
671 671
672 672 def iterchunks(bundle):
673 673 for chunk in bundle.getchunks():
674 674 pass
675 675
676 676 # bundle2
677 677
678 678 def forwardchunks(bundle):
679 679 for chunk in bundle._forwardchunks():
680 680 pass
681 681
682 682 def iterparts(bundle):
683 683 for part in bundle.iterparts():
684 684 pass
685 685
686 686 def iterpartsseekable(bundle):
687 687 for part in bundle.iterparts(seekable=True):
688 688 pass
689 689
690 690 def seek(bundle):
691 691 for part in bundle.iterparts(seekable=True):
692 692 part.seek(0, os.SEEK_END)
693 693
694 694 def makepartreadnbytes(size):
695 695 def run():
696 696 with open(bundlepath, b'rb') as fh:
697 697 bundle = exchange.readbundle(ui, fh, bundlepath)
698 698 for part in bundle.iterparts():
699 699 while part.read(size):
700 700 pass
701 701
702 702 return run
703 703
704 704 benches = [
705 705 (makestdioread(8192), b'read(8k)'),
706 706 (makestdioread(16384), b'read(16k)'),
707 707 (makestdioread(32768), b'read(32k)'),
708 708 (makestdioread(131072), b'read(128k)'),
709 709 ]
710 710
711 711 with open(bundlepath, b'rb') as fh:
712 712 bundle = exchange.readbundle(ui, fh, bundlepath)
713 713
714 714 if isinstance(bundle, changegroup.cg1unpacker):
715 715 benches.extend([
716 716 (makebench(deltaiter), b'cg1 deltaiter()'),
717 717 (makebench(iterchunks), b'cg1 getchunks()'),
718 718 (makereadnbytes(8192), b'cg1 read(8k)'),
719 719 (makereadnbytes(16384), b'cg1 read(16k)'),
720 720 (makereadnbytes(32768), b'cg1 read(32k)'),
721 721 (makereadnbytes(131072), b'cg1 read(128k)'),
722 722 ])
723 723 elif isinstance(bundle, bundle2.unbundle20):
724 724 benches.extend([
725 725 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
726 726 (makebench(iterparts), b'bundle2 iterparts()'),
727 727 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
728 728 (makebench(seek), b'bundle2 part seek()'),
729 729 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
730 730 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
731 731 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
732 732 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
733 733 ])
734 734 elif isinstance(bundle, streamclone.streamcloneapplier):
735 735 raise error.Abort(b'stream clone bundles not supported')
736 736 else:
737 737 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
738 738
739 739 for fn, title in benches:
740 740 timer, fm = gettimer(ui, opts)
741 741 timer(fn, title=title)
742 742 fm.end()
743 743
744 744 @command(b'perfchangegroupchangelog', formatteropts +
745 745 [(b'', b'cgversion', b'02', b'changegroup version'),
746 746 (b'r', b'rev', b'', b'revisions to add to changegroup')])
747 747 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
748 748 """Benchmark producing a changelog group for a changegroup.
749 749
750 750 This measures the time spent processing the changelog during a
751 751 bundle operation. This occurs during `hg bundle` and on a server
752 752 processing a `getbundle` wire protocol request (handles clones
753 753 and pull requests).
754 754
755 755 By default, all revisions are added to the changegroup.
756 756 """
757 757 opts = _byteskwargs(opts)
758 758 cl = repo.changelog
759 759 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
760 760 bundler = changegroup.getbundler(cgversion, repo)
761 761
762 762 def d():
763 763 state, chunks = bundler._generatechangelog(cl, nodes)
764 764 for chunk in chunks:
765 765 pass
766 766
767 767 timer, fm = gettimer(ui, opts)
768 768
769 769 # Terminal printing can interfere with timing. So disable it.
770 770 with ui.configoverride({(b'progress', b'disable'): True}):
771 771 timer(d)
772 772
773 773 fm.end()
774 774
775 775 @command(b'perfdirs', formatteropts)
776 776 def perfdirs(ui, repo, **opts):
777 777 opts = _byteskwargs(opts)
778 778 timer, fm = gettimer(ui, opts)
779 779 dirstate = repo.dirstate
780 780 b'a' in dirstate
781 781 def d():
782 782 dirstate.hasdir(b'a')
783 783 del dirstate._map._dirs
784 784 timer(d)
785 785 fm.end()
786 786
787 787 @command(b'perfdirstate', formatteropts)
788 788 def perfdirstate(ui, repo, **opts):
789 789 opts = _byteskwargs(opts)
790 790 timer, fm = gettimer(ui, opts)
791 791 b"a" in repo.dirstate
792 792 def d():
793 793 repo.dirstate.invalidate()
794 794 b"a" in repo.dirstate
795 795 timer(d)
796 796 fm.end()
797 797
798 798 @command(b'perfdirstatedirs', formatteropts)
799 799 def perfdirstatedirs(ui, repo, **opts):
800 800 opts = _byteskwargs(opts)
801 801 timer, fm = gettimer(ui, opts)
802 802 b"a" in repo.dirstate
803 803 def d():
804 804 repo.dirstate.hasdir(b"a")
805 805 del repo.dirstate._map._dirs
806 806 timer(d)
807 807 fm.end()
808 808
809 809 @command(b'perfdirstatefoldmap', formatteropts)
810 810 def perfdirstatefoldmap(ui, repo, **opts):
811 811 opts = _byteskwargs(opts)
812 812 timer, fm = gettimer(ui, opts)
813 813 dirstate = repo.dirstate
814 814 b'a' in dirstate
815 815 def d():
816 816 dirstate._map.filefoldmap.get(b'a')
817 817 del dirstate._map.filefoldmap
818 818 timer(d)
819 819 fm.end()
820 820
821 821 @command(b'perfdirfoldmap', formatteropts)
822 822 def perfdirfoldmap(ui, repo, **opts):
823 823 opts = _byteskwargs(opts)
824 824 timer, fm = gettimer(ui, opts)
825 825 dirstate = repo.dirstate
826 826 b'a' in dirstate
827 827 def d():
828 828 dirstate._map.dirfoldmap.get(b'a')
829 829 del dirstate._map.dirfoldmap
830 830 del dirstate._map._dirs
831 831 timer(d)
832 832 fm.end()
833 833
834 834 @command(b'perfdirstatewrite', formatteropts)
835 835 def perfdirstatewrite(ui, repo, **opts):
836 836 opts = _byteskwargs(opts)
837 837 timer, fm = gettimer(ui, opts)
838 838 ds = repo.dirstate
839 839 b"a" in ds
840 840 def d():
841 841 ds._dirty = True
842 842 ds.write(repo.currenttransaction())
843 843 timer(d)
844 844 fm.end()
845 845
846 846 @command(b'perfmergecalculate',
847 847 [(b'r', b'rev', b'.', b'rev to merge against')] + formatteropts)
848 848 def perfmergecalculate(ui, repo, rev, **opts):
849 849 opts = _byteskwargs(opts)
850 850 timer, fm = gettimer(ui, opts)
851 851 wctx = repo[None]
852 852 rctx = scmutil.revsingle(repo, rev, rev)
853 853 ancestor = wctx.ancestor(rctx)
854 854 # we don't want working dir files to be stat'd in the benchmark, so prime
855 855 # that cache
856 856 wctx.dirty()
857 857 def d():
858 858 # acceptremote is True because we don't want prompts in the middle of
859 859 # our benchmark
860 860 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
861 861 acceptremote=True, followcopies=True)
862 862 timer(d)
863 863 fm.end()
864 864
865 865 @command(b'perfpathcopies', [], b"REV REV")
866 866 def perfpathcopies(ui, repo, rev1, rev2, **opts):
867 867 """benchmark the copy tracing logic"""
868 868 opts = _byteskwargs(opts)
869 869 timer, fm = gettimer(ui, opts)
870 870 ctx1 = scmutil.revsingle(repo, rev1, rev1)
871 871 ctx2 = scmutil.revsingle(repo, rev2, rev2)
872 872 def d():
873 873 copies.pathcopies(ctx1, ctx2)
874 874 timer(d)
875 875 fm.end()
876 876
877 877 @command(b'perfphases',
878 878 [(b'', b'full', False, b'include file reading time too'),
879 879 ], b"")
880 880 def perfphases(ui, repo, **opts):
881 881 """benchmark phasesets computation"""
882 882 opts = _byteskwargs(opts)
883 883 timer, fm = gettimer(ui, opts)
884 884 _phases = repo._phasecache
885 885 full = opts.get(b'full')
886 886 def d():
887 887 phases = _phases
888 888 if full:
889 889 clearfilecache(repo, b'_phasecache')
890 890 phases = repo._phasecache
891 891 phases.invalidate()
892 892 phases.loadphaserevs(repo)
893 893 timer(d)
894 894 fm.end()
895 895
896 896 @command(b'perfphasesremote',
897 897 [], b"[DEST]")
898 898 def perfphasesremote(ui, repo, dest=None, **opts):
899 899 """benchmark time needed to analyse phases of the remote server"""
900 900 from mercurial.node import (
901 901 bin,
902 902 )
903 903 from mercurial import (
904 904 exchange,
905 905 hg,
906 906 phases,
907 907 )
908 908 opts = _byteskwargs(opts)
909 909 timer, fm = gettimer(ui, opts)
910 910
911 911 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
912 912 if not path:
913 913 raise error.Abort((b'default repository not configured!'),
914 914 hint=(b"see 'hg help config.paths'"))
915 915 dest = path.pushloc or path.loc
916 916 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
917 917 other = hg.peer(repo, opts, dest)
918 918
919 919 # easier to perform discovery through the operation
920 920 op = exchange.pushoperation(repo, other)
921 921 exchange._pushdiscoverychangeset(op)
922 922
923 923 remotesubset = op.fallbackheads
924 924
925 925 with other.commandexecutor() as e:
926 926 remotephases = e.callcommand(b'listkeys',
927 927 {b'namespace': b'phases'}).result()
928 928 del other
929 929 publishing = remotephases.get(b'publishing', False)
930 930 if publishing:
931 931 ui.status((b'publishing: yes\n'))
932 932 else:
933 933 ui.status((b'publishing: no\n'))
934 934
935 935 nodemap = repo.changelog.nodemap
936 936 nonpublishroots = 0
937 937 for nhex, phase in remotephases.iteritems():
938 938 if nhex == b'publishing': # ignore data related to publish option
939 939 continue
940 940 node = bin(nhex)
941 941 if node in nodemap and int(phase):
942 942 nonpublishroots += 1
943 943 ui.status((b'number of roots: %d\n') % len(remotephases))
944 944 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
945 945 def d():
946 946 phases.remotephasessummary(repo,
947 947 remotesubset,
948 948 remotephases)
949 949 timer(d)
950 950 fm.end()
951 951
952 952 @command(b'perfmanifest',[
953 953 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
954 954 (b'', b'clear-disk', False, b'clear on-disk caches too'),
955 955 ] + formatteropts, b'REV|NODE')
956 956 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
957 957 """benchmark the time to read a manifest from disk and return a usable
958 958 dict-like object
959 959
960 960 Manifest caches are cleared before retrieval."""
961 961 opts = _byteskwargs(opts)
962 962 timer, fm = gettimer(ui, opts)
963 963 if not manifest_rev:
964 964 ctx = scmutil.revsingle(repo, rev, rev)
965 965 t = ctx.manifestnode()
966 966 else:
967 967 from mercurial.node import bin
968 968
969 969 if len(rev) == 40:
970 970 t = bin(rev)
971 971 else:
972 972 try:
973 973 rev = int(rev)
974 974
975 975 if util.safehasattr(repo.manifestlog, b'getstorage'):
976 976 t = repo.manifestlog.getstorage(b'').node(rev)
977 977 else:
978 978 t = repo.manifestlog._revlog.lookup(rev)
979 979 except ValueError:
980 980 raise error.Abort(b'manifest revision must be integer or full '
981 981 b'node')
982 982 def d():
983 983 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
984 984 repo.manifestlog[t].read()
985 985 timer(d)
986 986 fm.end()
987 987
988 988 @command(b'perfchangeset', formatteropts)
989 989 def perfchangeset(ui, repo, rev, **opts):
990 990 opts = _byteskwargs(opts)
991 991 timer, fm = gettimer(ui, opts)
992 992 n = scmutil.revsingle(repo, rev).node()
993 993 def d():
994 994 repo.changelog.read(n)
995 995 #repo.changelog._cache = None
996 996 timer(d)
997 997 fm.end()
998 998
999 999 @command(b'perfignore', formatteropts)
1000 1000 def perfignore(ui, repo, **opts):
1001 1001 """benchmark operation related to computing ignore"""
1002 1002 opts = _byteskwargs(opts)
1003 1003 timer, fm = gettimer(ui, opts)
1004 1004 dirstate = repo.dirstate
1005 1005
1006 1006 def setupone():
1007 1007 dirstate.invalidate()
1008 1008 clearfilecache(dirstate, b'_ignore')
1009 1009
1010 1010 def runone():
1011 1011 dirstate._ignore
1012 1012
1013 1013 timer(runone, setup=setupone, title=b"load")
1014 1014 fm.end()
1015 1015
1016 1016 @command(b'perfindex', [
1017 1017 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1018 1018 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1019 1019 ] + formatteropts)
1020 1020 def perfindex(ui, repo, **opts):
1021 1021 """benchmark index creation time followed by a lookup
1022 1022
1023 1023 The default is to look `tip` up. Depending on the index implementation,
1024 1024 the revision looked up can matters. For example, an implementation
1025 1025 scanning the index will have a faster lookup time for `--rev tip` than for
1026 1026 `--rev 0`. The number of looked up revisions and their order can also
1027 1027 matters.
1028 1028
1029 1029 Example of useful set to test:
1030 1030 * tip
1031 1031 * 0
1032 1032 * -10:
1033 1033 * :10
1034 1034 * -10: + :10
1035 1035 * :10: + -10:
1036 1036 * -10000:
1037 1037 * -10000: + 0
1038 1038
1039 It is not currently possible to check for lookup of a missing node."""
1039 It is not currently possible to check for lookup of a missing node. For
1040 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1040 1041 import mercurial.revlog
1041 1042 opts = _byteskwargs(opts)
1042 1043 timer, fm = gettimer(ui, opts)
1043 1044 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1044 1045 if opts[b'no_lookup']:
1045 1046 if opts['rev']:
1046 1047 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1047 1048 nodes = []
1048 1049 elif not opts[b'rev']:
1049 1050 nodes = [repo[b"tip"].node()]
1050 1051 else:
1051 1052 revs = scmutil.revrange(repo, opts[b'rev'])
1052 1053 cl = repo.changelog
1053 1054 nodes = [cl.node(r) for r in revs]
1054 1055
1055 1056 unfi = repo.unfiltered()
1056 1057 # find the filecache func directly
1057 1058 # This avoid polluting the benchmark with the filecache logic
1058 1059 makecl = unfi.__class__.changelog.func
1059 1060 def setup():
1060 1061 # probably not necessary, but for good measure
1061 1062 clearchangelog(unfi)
1062 1063 def d():
1063 1064 cl = makecl(unfi)
1064 1065 for n in nodes:
1065 1066 cl.rev(n)
1066 1067 timer(d, setup=setup)
1067 1068 fm.end()
1068 1069
1070 @command(b'perfnodemap', [
1071 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1072 ] + formatteropts)
1073 def perfnodemap(ui, repo, **opts):
1074 """benchmark the time necessary to look up revision from a cold nodemap
1075
1076 Depending on the implementation, the amount and order of revision we look
1077 up can varies. Example of useful set to test:
1078 * tip
1079 * 0
1080 * -10:
1081 * :10
1082 * -10: + :10
1083 * :10: + -10:
1084 * -10000:
1085 * -10000: + 0
1086
1087 The command currently focus on valid binary lookup. Benchmarking for
1088 hexlookup, prefix lookup and missing lookup would also be valuable.
1089 """
1090 import mercurial.revlog
1091 opts = _byteskwargs(opts)
1092 timer, fm = gettimer(ui, opts)
1093 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1094
1095 unfi = repo.unfiltered()
1096 # find the filecache func directly
1097 # This avoid polluting the benchmark with the filecache logic
1098 makecl = unfi.__class__.changelog.func
1099 if not opts[b'rev']:
1100 raise error.Abort('use --rev to specify revisions to look up')
1101 revs = scmutil.revrange(repo, opts[b'rev'])
1102 cl = repo.changelog
1103 nodes = [cl.node(r) for r in revs]
1104
1105 # use a list to pass reference to a nodemap from one closure to the next
1106 nodeget = [None]
1107 def setnodeget():
1108 # probably not necessary, but for good measure
1109 clearchangelog(unfi)
1110 nodeget[0] = makecl(unfi).nodemap.get
1111
1112 def setup():
1113 setnodeget()
1114 def d():
1115 get = nodeget[0]
1116 for n in nodes:
1117 get(n)
1118
1119 timer(d, setup=setup)
1120 fm.end()
1121
1069 1122 @command(b'perfstartup', formatteropts)
1070 1123 def perfstartup(ui, repo, **opts):
1071 1124 opts = _byteskwargs(opts)
1072 1125 timer, fm = gettimer(ui, opts)
1073 1126 def d():
1074 1127 if os.name != r'nt':
1075 1128 os.system(b"HGRCPATH= %s version -q > /dev/null" %
1076 1129 fsencode(sys.argv[0]))
1077 1130 else:
1078 1131 os.environ[r'HGRCPATH'] = r' '
1079 1132 os.system(r"%s version -q > NUL" % sys.argv[0])
1080 1133 timer(d)
1081 1134 fm.end()
1082 1135
1083 1136 @command(b'perfparents', formatteropts)
1084 1137 def perfparents(ui, repo, **opts):
1085 1138 opts = _byteskwargs(opts)
1086 1139 timer, fm = gettimer(ui, opts)
1087 1140 # control the number of commits perfparents iterates over
1088 1141 # experimental config: perf.parentscount
1089 1142 count = getint(ui, b"perf", b"parentscount", 1000)
1090 1143 if len(repo.changelog) < count:
1091 1144 raise error.Abort(b"repo needs %d commits for this test" % count)
1092 1145 repo = repo.unfiltered()
1093 1146 nl = [repo.changelog.node(i) for i in _xrange(count)]
1094 1147 def d():
1095 1148 for n in nl:
1096 1149 repo.changelog.parents(n)
1097 1150 timer(d)
1098 1151 fm.end()
1099 1152
1100 1153 @command(b'perfctxfiles', formatteropts)
1101 1154 def perfctxfiles(ui, repo, x, **opts):
1102 1155 opts = _byteskwargs(opts)
1103 1156 x = int(x)
1104 1157 timer, fm = gettimer(ui, opts)
1105 1158 def d():
1106 1159 len(repo[x].files())
1107 1160 timer(d)
1108 1161 fm.end()
1109 1162
1110 1163 @command(b'perfrawfiles', formatteropts)
1111 1164 def perfrawfiles(ui, repo, x, **opts):
1112 1165 opts = _byteskwargs(opts)
1113 1166 x = int(x)
1114 1167 timer, fm = gettimer(ui, opts)
1115 1168 cl = repo.changelog
1116 1169 def d():
1117 1170 len(cl.read(x)[3])
1118 1171 timer(d)
1119 1172 fm.end()
1120 1173
1121 1174 @command(b'perflookup', formatteropts)
1122 1175 def perflookup(ui, repo, rev, **opts):
1123 1176 opts = _byteskwargs(opts)
1124 1177 timer, fm = gettimer(ui, opts)
1125 1178 timer(lambda: len(repo.lookup(rev)))
1126 1179 fm.end()
1127 1180
1128 1181 @command(b'perflinelogedits',
1129 1182 [(b'n', b'edits', 10000, b'number of edits'),
1130 1183 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1131 1184 ], norepo=True)
1132 1185 def perflinelogedits(ui, **opts):
1133 1186 from mercurial import linelog
1134 1187
1135 1188 opts = _byteskwargs(opts)
1136 1189
1137 1190 edits = opts[b'edits']
1138 1191 maxhunklines = opts[b'max_hunk_lines']
1139 1192
1140 1193 maxb1 = 100000
1141 1194 random.seed(0)
1142 1195 randint = random.randint
1143 1196 currentlines = 0
1144 1197 arglist = []
1145 1198 for rev in _xrange(edits):
1146 1199 a1 = randint(0, currentlines)
1147 1200 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1148 1201 b1 = randint(0, maxb1)
1149 1202 b2 = randint(b1, b1 + maxhunklines)
1150 1203 currentlines += (b2 - b1) - (a2 - a1)
1151 1204 arglist.append((rev, a1, a2, b1, b2))
1152 1205
1153 1206 def d():
1154 1207 ll = linelog.linelog()
1155 1208 for args in arglist:
1156 1209 ll.replacelines(*args)
1157 1210
1158 1211 timer, fm = gettimer(ui, opts)
1159 1212 timer(d)
1160 1213 fm.end()
1161 1214
1162 1215 @command(b'perfrevrange', formatteropts)
1163 1216 def perfrevrange(ui, repo, *specs, **opts):
1164 1217 opts = _byteskwargs(opts)
1165 1218 timer, fm = gettimer(ui, opts)
1166 1219 revrange = scmutil.revrange
1167 1220 timer(lambda: len(revrange(repo, specs)))
1168 1221 fm.end()
1169 1222
1170 1223 @command(b'perfnodelookup', formatteropts)
1171 1224 def perfnodelookup(ui, repo, rev, **opts):
1172 1225 opts = _byteskwargs(opts)
1173 1226 timer, fm = gettimer(ui, opts)
1174 1227 import mercurial.revlog
1175 1228 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1176 1229 n = scmutil.revsingle(repo, rev).node()
1177 1230 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1178 1231 def d():
1179 1232 cl.rev(n)
1180 1233 clearcaches(cl)
1181 1234 timer(d)
1182 1235 fm.end()
1183 1236
1184 1237 @command(b'perflog',
1185 1238 [(b'', b'rename', False, b'ask log to follow renames')
1186 1239 ] + formatteropts)
1187 1240 def perflog(ui, repo, rev=None, **opts):
1188 1241 opts = _byteskwargs(opts)
1189 1242 if rev is None:
1190 1243 rev=[]
1191 1244 timer, fm = gettimer(ui, opts)
1192 1245 ui.pushbuffer()
1193 1246 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1194 1247 copies=opts.get(b'rename')))
1195 1248 ui.popbuffer()
1196 1249 fm.end()
1197 1250
1198 1251 @command(b'perfmoonwalk', formatteropts)
1199 1252 def perfmoonwalk(ui, repo, **opts):
1200 1253 """benchmark walking the changelog backwards
1201 1254
1202 1255 This also loads the changelog data for each revision in the changelog.
1203 1256 """
1204 1257 opts = _byteskwargs(opts)
1205 1258 timer, fm = gettimer(ui, opts)
1206 1259 def moonwalk():
1207 1260 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1208 1261 ctx = repo[i]
1209 1262 ctx.branch() # read changelog data (in addition to the index)
1210 1263 timer(moonwalk)
1211 1264 fm.end()
1212 1265
1213 1266 @command(b'perftemplating',
1214 1267 [(b'r', b'rev', [], b'revisions to run the template on'),
1215 1268 ] + formatteropts)
1216 1269 def perftemplating(ui, repo, testedtemplate=None, **opts):
1217 1270 """test the rendering time of a given template"""
1218 1271 if makelogtemplater is None:
1219 1272 raise error.Abort((b"perftemplating not available with this Mercurial"),
1220 1273 hint=b"use 4.3 or later")
1221 1274
1222 1275 opts = _byteskwargs(opts)
1223 1276
1224 1277 nullui = ui.copy()
1225 1278 nullui.fout = open(os.devnull, r'wb')
1226 1279 nullui.disablepager()
1227 1280 revs = opts.get(b'rev')
1228 1281 if not revs:
1229 1282 revs = [b'all()']
1230 1283 revs = list(scmutil.revrange(repo, revs))
1231 1284
1232 1285 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1233 1286 b' {author|person}: {desc|firstline}\n')
1234 1287 if testedtemplate is None:
1235 1288 testedtemplate = defaulttemplate
1236 1289 displayer = makelogtemplater(nullui, repo, testedtemplate)
1237 1290 def format():
1238 1291 for r in revs:
1239 1292 ctx = repo[r]
1240 1293 displayer.show(ctx)
1241 1294 displayer.flush(ctx)
1242 1295
1243 1296 timer, fm = gettimer(ui, opts)
1244 1297 timer(format)
1245 1298 fm.end()
1246 1299
1247 1300 @command(b'perfhelper-pathcopies', formatteropts +
1248 1301 [
1249 1302 (b'r', b'revs', [], b'restrict search to these revisions'),
1250 1303 (b'', b'timing', False, b'provides extra data (costly)'),
1251 1304 ])
1252 1305 def perfhelperpathcopies(ui, repo, revs=[], **opts):
1253 1306 """find statistic about potential parameters for the `perftracecopies`
1254 1307
1255 1308 This command find source-destination pair relevant for copytracing testing.
1256 1309 It report value for some of the parameters that impact copy tracing time.
1257 1310
1258 1311 If `--timing` is set, rename detection is run and the associated timing
1259 1312 will be reported. The extra details comes at the cost of a slower command
1260 1313 execution.
1261 1314
1262 1315 Since the rename detection is only run once, other factors might easily
1263 1316 affect the precision of the timing. However it should give a good
1264 1317 approximation of which revision pairs are very costly.
1265 1318 """
1266 1319 opts = _byteskwargs(opts)
1267 1320 fm = ui.formatter(b'perf', opts)
1268 1321 dotiming = opts[b'timing']
1269 1322
1270 1323 if dotiming:
1271 1324 header = '%12s %12s %12s %12s %12s %12s\n'
1272 1325 output = ("%(source)12s %(destination)12s "
1273 1326 "%(nbrevs)12d %(nbmissingfiles)12d "
1274 1327 "%(nbrenamedfiles)12d %(time)18.5f\n")
1275 1328 header_names = ("source", "destination", "nb-revs", "nb-files",
1276 1329 "nb-renames", "time")
1277 1330 fm.plain(header % header_names)
1278 1331 else:
1279 1332 header = '%12s %12s %12s %12s\n'
1280 1333 output = ("%(source)12s %(destination)12s "
1281 1334 "%(nbrevs)12d %(nbmissingfiles)12d\n")
1282 1335 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
1283 1336
1284 1337 if not revs:
1285 1338 revs = ['all()']
1286 1339 revs = scmutil.revrange(repo, revs)
1287 1340
1288 1341 roi = repo.revs('merge() and %ld', revs)
1289 1342 for r in roi:
1290 1343 ctx = repo[r]
1291 1344 p1 = ctx.p1().rev()
1292 1345 p2 = ctx.p2().rev()
1293 1346 bases = repo.changelog._commonancestorsheads(p1, p2)
1294 1347 for p in (p1, p2):
1295 1348 for b in bases:
1296 1349 base = repo[b]
1297 1350 parent = repo[p]
1298 1351 missing = copies._computeforwardmissing(base, parent)
1299 1352 if not missing:
1300 1353 continue
1301 1354 data = {
1302 1355 b'source': base.hex(),
1303 1356 b'destination': parent.hex(),
1304 1357 b'nbrevs': len(repo.revs('%d::%d', b, p)),
1305 1358 b'nbmissingfiles': len(missing),
1306 1359 }
1307 1360 if dotiming:
1308 1361 begin = util.timer()
1309 1362 renames = copies.pathcopies(base, parent)
1310 1363 end = util.timer()
1311 1364 # not very stable timing since we did only one run
1312 1365 data['time'] = end - begin
1313 1366 data['nbrenamedfiles'] = len(renames)
1314 1367 fm.startitem()
1315 1368 fm.data(**data)
1316 1369 out = data.copy()
1317 1370 out['source'] = fm.hexfunc(base.node())
1318 1371 out['destination'] = fm.hexfunc(parent.node())
1319 1372 fm.plain(output % out)
1320 1373
1321 1374 fm.end()
1322 1375
1323 1376 @command(b'perfcca', formatteropts)
1324 1377 def perfcca(ui, repo, **opts):
1325 1378 opts = _byteskwargs(opts)
1326 1379 timer, fm = gettimer(ui, opts)
1327 1380 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1328 1381 fm.end()
1329 1382
1330 1383 @command(b'perffncacheload', formatteropts)
1331 1384 def perffncacheload(ui, repo, **opts):
1332 1385 opts = _byteskwargs(opts)
1333 1386 timer, fm = gettimer(ui, opts)
1334 1387 s = repo.store
1335 1388 def d():
1336 1389 s.fncache._load()
1337 1390 timer(d)
1338 1391 fm.end()
1339 1392
1340 1393 @command(b'perffncachewrite', formatteropts)
1341 1394 def perffncachewrite(ui, repo, **opts):
1342 1395 opts = _byteskwargs(opts)
1343 1396 timer, fm = gettimer(ui, opts)
1344 1397 s = repo.store
1345 1398 lock = repo.lock()
1346 1399 s.fncache._load()
1347 1400 tr = repo.transaction(b'perffncachewrite')
1348 1401 tr.addbackup(b'fncache')
1349 1402 def d():
1350 1403 s.fncache._dirty = True
1351 1404 s.fncache.write(tr)
1352 1405 timer(d)
1353 1406 tr.close()
1354 1407 lock.release()
1355 1408 fm.end()
1356 1409
1357 1410 @command(b'perffncacheencode', formatteropts)
1358 1411 def perffncacheencode(ui, repo, **opts):
1359 1412 opts = _byteskwargs(opts)
1360 1413 timer, fm = gettimer(ui, opts)
1361 1414 s = repo.store
1362 1415 s.fncache._load()
1363 1416 def d():
1364 1417 for p in s.fncache.entries:
1365 1418 s.encode(p)
1366 1419 timer(d)
1367 1420 fm.end()
1368 1421
1369 1422 def _bdiffworker(q, blocks, xdiff, ready, done):
1370 1423 while not done.is_set():
1371 1424 pair = q.get()
1372 1425 while pair is not None:
1373 1426 if xdiff:
1374 1427 mdiff.bdiff.xdiffblocks(*pair)
1375 1428 elif blocks:
1376 1429 mdiff.bdiff.blocks(*pair)
1377 1430 else:
1378 1431 mdiff.textdiff(*pair)
1379 1432 q.task_done()
1380 1433 pair = q.get()
1381 1434 q.task_done() # for the None one
1382 1435 with ready:
1383 1436 ready.wait()
1384 1437
1385 1438 def _manifestrevision(repo, mnode):
1386 1439 ml = repo.manifestlog
1387 1440
1388 1441 if util.safehasattr(ml, b'getstorage'):
1389 1442 store = ml.getstorage(b'')
1390 1443 else:
1391 1444 store = ml._revlog
1392 1445
1393 1446 return store.revision(mnode)
1394 1447
1395 1448 @command(b'perfbdiff', revlogopts + formatteropts + [
1396 1449 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1397 1450 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1398 1451 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1399 1452 (b'', b'blocks', False, b'test computing diffs into blocks'),
1400 1453 (b'', b'xdiff', False, b'use xdiff algorithm'),
1401 1454 ],
1402 1455
1403 1456 b'-c|-m|FILE REV')
1404 1457 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1405 1458 """benchmark a bdiff between revisions
1406 1459
1407 1460 By default, benchmark a bdiff between its delta parent and itself.
1408 1461
1409 1462 With ``--count``, benchmark bdiffs between delta parents and self for N
1410 1463 revisions starting at the specified revision.
1411 1464
1412 1465 With ``--alldata``, assume the requested revision is a changeset and
1413 1466 measure bdiffs for all changes related to that changeset (manifest
1414 1467 and filelogs).
1415 1468 """
1416 1469 opts = _byteskwargs(opts)
1417 1470
1418 1471 if opts[b'xdiff'] and not opts[b'blocks']:
1419 1472 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1420 1473
1421 1474 if opts[b'alldata']:
1422 1475 opts[b'changelog'] = True
1423 1476
1424 1477 if opts.get(b'changelog') or opts.get(b'manifest'):
1425 1478 file_, rev = None, file_
1426 1479 elif rev is None:
1427 1480 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1428 1481
1429 1482 blocks = opts[b'blocks']
1430 1483 xdiff = opts[b'xdiff']
1431 1484 textpairs = []
1432 1485
1433 1486 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1434 1487
1435 1488 startrev = r.rev(r.lookup(rev))
1436 1489 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1437 1490 if opts[b'alldata']:
1438 1491 # Load revisions associated with changeset.
1439 1492 ctx = repo[rev]
1440 1493 mtext = _manifestrevision(repo, ctx.manifestnode())
1441 1494 for pctx in ctx.parents():
1442 1495 pman = _manifestrevision(repo, pctx.manifestnode())
1443 1496 textpairs.append((pman, mtext))
1444 1497
1445 1498 # Load filelog revisions by iterating manifest delta.
1446 1499 man = ctx.manifest()
1447 1500 pman = ctx.p1().manifest()
1448 1501 for filename, change in pman.diff(man).items():
1449 1502 fctx = repo.file(filename)
1450 1503 f1 = fctx.revision(change[0][0] or -1)
1451 1504 f2 = fctx.revision(change[1][0] or -1)
1452 1505 textpairs.append((f1, f2))
1453 1506 else:
1454 1507 dp = r.deltaparent(rev)
1455 1508 textpairs.append((r.revision(dp), r.revision(rev)))
1456 1509
1457 1510 withthreads = threads > 0
1458 1511 if not withthreads:
1459 1512 def d():
1460 1513 for pair in textpairs:
1461 1514 if xdiff:
1462 1515 mdiff.bdiff.xdiffblocks(*pair)
1463 1516 elif blocks:
1464 1517 mdiff.bdiff.blocks(*pair)
1465 1518 else:
1466 1519 mdiff.textdiff(*pair)
1467 1520 else:
1468 1521 q = queue()
1469 1522 for i in _xrange(threads):
1470 1523 q.put(None)
1471 1524 ready = threading.Condition()
1472 1525 done = threading.Event()
1473 1526 for i in _xrange(threads):
1474 1527 threading.Thread(target=_bdiffworker,
1475 1528 args=(q, blocks, xdiff, ready, done)).start()
1476 1529 q.join()
1477 1530 def d():
1478 1531 for pair in textpairs:
1479 1532 q.put(pair)
1480 1533 for i in _xrange(threads):
1481 1534 q.put(None)
1482 1535 with ready:
1483 1536 ready.notify_all()
1484 1537 q.join()
1485 1538 timer, fm = gettimer(ui, opts)
1486 1539 timer(d)
1487 1540 fm.end()
1488 1541
1489 1542 if withthreads:
1490 1543 done.set()
1491 1544 for i in _xrange(threads):
1492 1545 q.put(None)
1493 1546 with ready:
1494 1547 ready.notify_all()
1495 1548
1496 1549 @command(b'perfunidiff', revlogopts + formatteropts + [
1497 1550 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1498 1551 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
1499 1552 ], b'-c|-m|FILE REV')
1500 1553 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1501 1554 """benchmark a unified diff between revisions
1502 1555
1503 1556 This doesn't include any copy tracing - it's just a unified diff
1504 1557 of the texts.
1505 1558
1506 1559 By default, benchmark a diff between its delta parent and itself.
1507 1560
1508 1561 With ``--count``, benchmark diffs between delta parents and self for N
1509 1562 revisions starting at the specified revision.
1510 1563
1511 1564 With ``--alldata``, assume the requested revision is a changeset and
1512 1565 measure diffs for all changes related to that changeset (manifest
1513 1566 and filelogs).
1514 1567 """
1515 1568 opts = _byteskwargs(opts)
1516 1569 if opts[b'alldata']:
1517 1570 opts[b'changelog'] = True
1518 1571
1519 1572 if opts.get(b'changelog') or opts.get(b'manifest'):
1520 1573 file_, rev = None, file_
1521 1574 elif rev is None:
1522 1575 raise error.CommandError(b'perfunidiff', b'invalid arguments')
1523 1576
1524 1577 textpairs = []
1525 1578
1526 1579 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
1527 1580
1528 1581 startrev = r.rev(r.lookup(rev))
1529 1582 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1530 1583 if opts[b'alldata']:
1531 1584 # Load revisions associated with changeset.
1532 1585 ctx = repo[rev]
1533 1586 mtext = _manifestrevision(repo, ctx.manifestnode())
1534 1587 for pctx in ctx.parents():
1535 1588 pman = _manifestrevision(repo, pctx.manifestnode())
1536 1589 textpairs.append((pman, mtext))
1537 1590
1538 1591 # Load filelog revisions by iterating manifest delta.
1539 1592 man = ctx.manifest()
1540 1593 pman = ctx.p1().manifest()
1541 1594 for filename, change in pman.diff(man).items():
1542 1595 fctx = repo.file(filename)
1543 1596 f1 = fctx.revision(change[0][0] or -1)
1544 1597 f2 = fctx.revision(change[1][0] or -1)
1545 1598 textpairs.append((f1, f2))
1546 1599 else:
1547 1600 dp = r.deltaparent(rev)
1548 1601 textpairs.append((r.revision(dp), r.revision(rev)))
1549 1602
1550 1603 def d():
1551 1604 for left, right in textpairs:
1552 1605 # The date strings don't matter, so we pass empty strings.
1553 1606 headerlines, hunks = mdiff.unidiff(
1554 1607 left, b'', right, b'', b'left', b'right', binary=False)
1555 1608 # consume iterators in roughly the way patch.py does
1556 1609 b'\n'.join(headerlines)
1557 1610 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1558 1611 timer, fm = gettimer(ui, opts)
1559 1612 timer(d)
1560 1613 fm.end()
1561 1614
1562 1615 @command(b'perfdiffwd', formatteropts)
1563 1616 def perfdiffwd(ui, repo, **opts):
1564 1617 """Profile diff of working directory changes"""
1565 1618 opts = _byteskwargs(opts)
1566 1619 timer, fm = gettimer(ui, opts)
1567 1620 options = {
1568 1621 'w': 'ignore_all_space',
1569 1622 'b': 'ignore_space_change',
1570 1623 'B': 'ignore_blank_lines',
1571 1624 }
1572 1625
1573 1626 for diffopt in ('', 'w', 'b', 'B', 'wB'):
1574 1627 opts = dict((options[c], b'1') for c in diffopt)
1575 1628 def d():
1576 1629 ui.pushbuffer()
1577 1630 commands.diff(ui, repo, **opts)
1578 1631 ui.popbuffer()
1579 1632 diffopt = diffopt.encode('ascii')
1580 1633 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
1581 1634 timer(d, title=title)
1582 1635 fm.end()
1583 1636
1584 1637 @command(b'perfrevlogindex', revlogopts + formatteropts,
1585 1638 b'-c|-m|FILE')
1586 1639 def perfrevlogindex(ui, repo, file_=None, **opts):
1587 1640 """Benchmark operations against a revlog index.
1588 1641
1589 1642 This tests constructing a revlog instance, reading index data,
1590 1643 parsing index data, and performing various operations related to
1591 1644 index data.
1592 1645 """
1593 1646
1594 1647 opts = _byteskwargs(opts)
1595 1648
1596 1649 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
1597 1650
1598 1651 opener = getattr(rl, 'opener') # trick linter
1599 1652 indexfile = rl.indexfile
1600 1653 data = opener.read(indexfile)
1601 1654
1602 1655 header = struct.unpack(b'>I', data[0:4])[0]
1603 1656 version = header & 0xFFFF
1604 1657 if version == 1:
1605 1658 revlogio = revlog.revlogio()
1606 1659 inline = header & (1 << 16)
1607 1660 else:
1608 1661 raise error.Abort((b'unsupported revlog version: %d') % version)
1609 1662
1610 1663 rllen = len(rl)
1611 1664
1612 1665 node0 = rl.node(0)
1613 1666 node25 = rl.node(rllen // 4)
1614 1667 node50 = rl.node(rllen // 2)
1615 1668 node75 = rl.node(rllen // 4 * 3)
1616 1669 node100 = rl.node(rllen - 1)
1617 1670
1618 1671 allrevs = range(rllen)
1619 1672 allrevsrev = list(reversed(allrevs))
1620 1673 allnodes = [rl.node(rev) for rev in range(rllen)]
1621 1674 allnodesrev = list(reversed(allnodes))
1622 1675
1623 1676 def constructor():
1624 1677 revlog.revlog(opener, indexfile)
1625 1678
1626 1679 def read():
1627 1680 with opener(indexfile) as fh:
1628 1681 fh.read()
1629 1682
1630 1683 def parseindex():
1631 1684 revlogio.parseindex(data, inline)
1632 1685
1633 1686 def getentry(revornode):
1634 1687 index = revlogio.parseindex(data, inline)[0]
1635 1688 index[revornode]
1636 1689
1637 1690 def getentries(revs, count=1):
1638 1691 index = revlogio.parseindex(data, inline)[0]
1639 1692
1640 1693 for i in range(count):
1641 1694 for rev in revs:
1642 1695 index[rev]
1643 1696
1644 1697 def resolvenode(node):
1645 1698 nodemap = revlogio.parseindex(data, inline)[1]
1646 1699 # This only works for the C code.
1647 1700 if nodemap is None:
1648 1701 return
1649 1702
1650 1703 try:
1651 1704 nodemap[node]
1652 1705 except error.RevlogError:
1653 1706 pass
1654 1707
1655 1708 def resolvenodes(nodes, count=1):
1656 1709 nodemap = revlogio.parseindex(data, inline)[1]
1657 1710 if nodemap is None:
1658 1711 return
1659 1712
1660 1713 for i in range(count):
1661 1714 for node in nodes:
1662 1715 try:
1663 1716 nodemap[node]
1664 1717 except error.RevlogError:
1665 1718 pass
1666 1719
1667 1720 benches = [
1668 1721 (constructor, b'revlog constructor'),
1669 1722 (read, b'read'),
1670 1723 (parseindex, b'create index object'),
1671 1724 (lambda: getentry(0), b'retrieve index entry for rev 0'),
1672 1725 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
1673 1726 (lambda: resolvenode(node0), b'look up node at rev 0'),
1674 1727 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
1675 1728 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
1676 1729 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
1677 1730 (lambda: resolvenode(node100), b'look up node at tip'),
1678 1731 # 2x variation is to measure caching impact.
1679 1732 (lambda: resolvenodes(allnodes),
1680 1733 b'look up all nodes (forward)'),
1681 1734 (lambda: resolvenodes(allnodes, 2),
1682 1735 b'look up all nodes 2x (forward)'),
1683 1736 (lambda: resolvenodes(allnodesrev),
1684 1737 b'look up all nodes (reverse)'),
1685 1738 (lambda: resolvenodes(allnodesrev, 2),
1686 1739 b'look up all nodes 2x (reverse)'),
1687 1740 (lambda: getentries(allrevs),
1688 1741 b'retrieve all index entries (forward)'),
1689 1742 (lambda: getentries(allrevs, 2),
1690 1743 b'retrieve all index entries 2x (forward)'),
1691 1744 (lambda: getentries(allrevsrev),
1692 1745 b'retrieve all index entries (reverse)'),
1693 1746 (lambda: getentries(allrevsrev, 2),
1694 1747 b'retrieve all index entries 2x (reverse)'),
1695 1748 ]
1696 1749
1697 1750 for fn, title in benches:
1698 1751 timer, fm = gettimer(ui, opts)
1699 1752 timer(fn, title=title)
1700 1753 fm.end()
1701 1754
1702 1755 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
1703 1756 [(b'd', b'dist', 100, b'distance between the revisions'),
1704 1757 (b's', b'startrev', 0, b'revision to start reading at'),
1705 1758 (b'', b'reverse', False, b'read in reverse')],
1706 1759 b'-c|-m|FILE')
1707 1760 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
1708 1761 **opts):
1709 1762 """Benchmark reading a series of revisions from a revlog.
1710 1763
1711 1764 By default, we read every ``-d/--dist`` revision from 0 to tip of
1712 1765 the specified revlog.
1713 1766
1714 1767 The start revision can be defined via ``-s/--startrev``.
1715 1768 """
1716 1769 opts = _byteskwargs(opts)
1717 1770
1718 1771 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
1719 1772 rllen = getlen(ui)(rl)
1720 1773
1721 1774 if startrev < 0:
1722 1775 startrev = rllen + startrev
1723 1776
1724 1777 def d():
1725 1778 rl.clearcaches()
1726 1779
1727 1780 beginrev = startrev
1728 1781 endrev = rllen
1729 1782 dist = opts[b'dist']
1730 1783
1731 1784 if reverse:
1732 1785 beginrev, endrev = endrev - 1, beginrev - 1
1733 1786 dist = -1 * dist
1734 1787
1735 1788 for x in _xrange(beginrev, endrev, dist):
1736 1789 # Old revisions don't support passing int.
1737 1790 n = rl.node(x)
1738 1791 rl.revision(n)
1739 1792
1740 1793 timer, fm = gettimer(ui, opts)
1741 1794 timer(d)
1742 1795 fm.end()
1743 1796
1744 1797 @command(b'perfrevlogwrite', revlogopts + formatteropts +
1745 1798 [(b's', b'startrev', 1000, b'revision to start writing at'),
1746 1799 (b'', b'stoprev', -1, b'last revision to write'),
1747 1800 (b'', b'count', 3, b'last revision to write'),
1748 1801 (b'', b'details', False, b'print timing for every revisions tested'),
1749 1802 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
1750 1803 (b'', b'lazydeltabase', True, b'try the provided delta first'),
1751 1804 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1752 1805 ],
1753 1806 b'-c|-m|FILE')
1754 1807 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
1755 1808 """Benchmark writing a series of revisions to a revlog.
1756 1809
1757 1810 Possible source values are:
1758 1811 * `full`: add from a full text (default).
1759 1812 * `parent-1`: add from a delta to the first parent
1760 1813 * `parent-2`: add from a delta to the second parent if it exists
1761 1814 (use a delta from the first parent otherwise)
1762 1815 * `parent-smallest`: add from the smallest delta (either p1 or p2)
1763 1816 * `storage`: add from the existing precomputed deltas
1764 1817 """
1765 1818 opts = _byteskwargs(opts)
1766 1819
1767 1820 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
1768 1821 rllen = getlen(ui)(rl)
1769 1822 if startrev < 0:
1770 1823 startrev = rllen + startrev
1771 1824 if stoprev < 0:
1772 1825 stoprev = rllen + stoprev
1773 1826
1774 1827 lazydeltabase = opts['lazydeltabase']
1775 1828 source = opts['source']
1776 1829 clearcaches = opts['clear_caches']
1777 1830 validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest',
1778 1831 b'storage')
1779 1832 if source not in validsource:
1780 1833 raise error.Abort('invalid source type: %s' % source)
1781 1834
1782 1835 ### actually gather results
1783 1836 count = opts['count']
1784 1837 if count <= 0:
1785 1838 raise error.Abort('invalide run count: %d' % count)
1786 1839 allresults = []
1787 1840 for c in range(count):
1788 1841 timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1,
1789 1842 lazydeltabase=lazydeltabase,
1790 1843 clearcaches=clearcaches)
1791 1844 allresults.append(timing)
1792 1845
1793 1846 ### consolidate the results in a single list
1794 1847 results = []
1795 1848 for idx, (rev, t) in enumerate(allresults[0]):
1796 1849 ts = [t]
1797 1850 for other in allresults[1:]:
1798 1851 orev, ot = other[idx]
1799 1852 assert orev == rev
1800 1853 ts.append(ot)
1801 1854 results.append((rev, ts))
1802 1855 resultcount = len(results)
1803 1856
1804 1857 ### Compute and display relevant statistics
1805 1858
1806 1859 # get a formatter
1807 1860 fm = ui.formatter(b'perf', opts)
1808 1861 displayall = ui.configbool(b"perf", b"all-timing", False)
1809 1862
1810 1863 # print individual details if requested
1811 1864 if opts['details']:
1812 1865 for idx, item in enumerate(results, 1):
1813 1866 rev, data = item
1814 1867 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
1815 1868 formatone(fm, data, title=title, displayall=displayall)
1816 1869
1817 1870 # sorts results by median time
1818 1871 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
1819 1872 # list of (name, index) to display)
1820 1873 relevants = [
1821 1874 ("min", 0),
1822 1875 ("10%", resultcount * 10 // 100),
1823 1876 ("25%", resultcount * 25 // 100),
1824 1877 ("50%", resultcount * 70 // 100),
1825 1878 ("75%", resultcount * 75 // 100),
1826 1879 ("90%", resultcount * 90 // 100),
1827 1880 ("95%", resultcount * 95 // 100),
1828 1881 ("99%", resultcount * 99 // 100),
1829 1882 ("99.9%", resultcount * 999 // 1000),
1830 1883 ("99.99%", resultcount * 9999 // 10000),
1831 1884 ("99.999%", resultcount * 99999 // 100000),
1832 1885 ("max", -1),
1833 1886 ]
1834 1887 if not ui.quiet:
1835 1888 for name, idx in relevants:
1836 1889 data = results[idx]
1837 1890 title = '%s of %d, rev %d' % (name, resultcount, data[0])
1838 1891 formatone(fm, data[1], title=title, displayall=displayall)
1839 1892
1840 1893 # XXX summing that many float will not be very precise, we ignore this fact
1841 1894 # for now
1842 1895 totaltime = []
1843 1896 for item in allresults:
1844 1897 totaltime.append((sum(x[1][0] for x in item),
1845 1898 sum(x[1][1] for x in item),
1846 1899 sum(x[1][2] for x in item),)
1847 1900 )
1848 1901 formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
1849 1902 displayall=displayall)
1850 1903 fm.end()
1851 1904
1852 1905 class _faketr(object):
1853 1906 def add(s, x, y, z=None):
1854 1907 return None
1855 1908
1856 1909 def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None,
1857 1910 lazydeltabase=True, clearcaches=True):
1858 1911 timings = []
1859 1912 tr = _faketr()
1860 1913 with _temprevlog(ui, orig, startrev) as dest:
1861 1914 dest._lazydeltabase = lazydeltabase
1862 1915 revs = list(orig.revs(startrev, stoprev))
1863 1916 total = len(revs)
1864 1917 topic = 'adding'
1865 1918 if runidx is not None:
1866 1919 topic += ' (run #%d)' % runidx
1867 1920 # Support both old and new progress API
1868 1921 if util.safehasattr(ui, 'makeprogress'):
1869 1922 progress = ui.makeprogress(topic, unit='revs', total=total)
1870 1923 def updateprogress(pos):
1871 1924 progress.update(pos)
1872 1925 def completeprogress():
1873 1926 progress.complete()
1874 1927 else:
1875 1928 def updateprogress(pos):
1876 1929 ui.progress(topic, pos, unit='revs', total=total)
1877 1930 def completeprogress():
1878 1931 ui.progress(topic, None, unit='revs', total=total)
1879 1932
1880 1933 for idx, rev in enumerate(revs):
1881 1934 updateprogress(idx)
1882 1935 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
1883 1936 if clearcaches:
1884 1937 dest.index.clearcaches()
1885 1938 dest.clearcaches()
1886 1939 with timeone() as r:
1887 1940 dest.addrawrevision(*addargs, **addkwargs)
1888 1941 timings.append((rev, r[0]))
1889 1942 updateprogress(total)
1890 1943 completeprogress()
1891 1944 return timings
1892 1945
1893 1946 def _getrevisionseed(orig, rev, tr, source):
1894 1947 from mercurial.node import nullid
1895 1948
1896 1949 linkrev = orig.linkrev(rev)
1897 1950 node = orig.node(rev)
1898 1951 p1, p2 = orig.parents(node)
1899 1952 flags = orig.flags(rev)
1900 1953 cachedelta = None
1901 1954 text = None
1902 1955
1903 1956 if source == b'full':
1904 1957 text = orig.revision(rev)
1905 1958 elif source == b'parent-1':
1906 1959 baserev = orig.rev(p1)
1907 1960 cachedelta = (baserev, orig.revdiff(p1, rev))
1908 1961 elif source == b'parent-2':
1909 1962 parent = p2
1910 1963 if p2 == nullid:
1911 1964 parent = p1
1912 1965 baserev = orig.rev(parent)
1913 1966 cachedelta = (baserev, orig.revdiff(parent, rev))
1914 1967 elif source == b'parent-smallest':
1915 1968 p1diff = orig.revdiff(p1, rev)
1916 1969 parent = p1
1917 1970 diff = p1diff
1918 1971 if p2 != nullid:
1919 1972 p2diff = orig.revdiff(p2, rev)
1920 1973 if len(p1diff) > len(p2diff):
1921 1974 parent = p2
1922 1975 diff = p2diff
1923 1976 baserev = orig.rev(parent)
1924 1977 cachedelta = (baserev, diff)
1925 1978 elif source == b'storage':
1926 1979 baserev = orig.deltaparent(rev)
1927 1980 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
1928 1981
1929 1982 return ((text, tr, linkrev, p1, p2),
1930 1983 {'node': node, 'flags': flags, 'cachedelta': cachedelta})
1931 1984
1932 1985 @contextlib.contextmanager
1933 1986 def _temprevlog(ui, orig, truncaterev):
1934 1987 from mercurial import vfs as vfsmod
1935 1988
1936 1989 if orig._inline:
1937 1990 raise error.Abort('not supporting inline revlog (yet)')
1938 1991
1939 1992 origindexpath = orig.opener.join(orig.indexfile)
1940 1993 origdatapath = orig.opener.join(orig.datafile)
1941 1994 indexname = 'revlog.i'
1942 1995 dataname = 'revlog.d'
1943 1996
1944 1997 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
1945 1998 try:
1946 1999 # copy the data file in a temporary directory
1947 2000 ui.debug('copying data in %s\n' % tmpdir)
1948 2001 destindexpath = os.path.join(tmpdir, 'revlog.i')
1949 2002 destdatapath = os.path.join(tmpdir, 'revlog.d')
1950 2003 shutil.copyfile(origindexpath, destindexpath)
1951 2004 shutil.copyfile(origdatapath, destdatapath)
1952 2005
1953 2006 # remove the data we want to add again
1954 2007 ui.debug('truncating data to be rewritten\n')
1955 2008 with open(destindexpath, 'ab') as index:
1956 2009 index.seek(0)
1957 2010 index.truncate(truncaterev * orig._io.size)
1958 2011 with open(destdatapath, 'ab') as data:
1959 2012 data.seek(0)
1960 2013 data.truncate(orig.start(truncaterev))
1961 2014
1962 2015 # instantiate a new revlog from the temporary copy
1963 2016 ui.debug('truncating adding to be rewritten\n')
1964 2017 vfs = vfsmod.vfs(tmpdir)
1965 2018 vfs.options = getattr(orig.opener, 'options', None)
1966 2019
1967 2020 dest = revlog.revlog(vfs,
1968 2021 indexfile=indexname,
1969 2022 datafile=dataname)
1970 2023 if dest._inline:
1971 2024 raise error.Abort('not supporting inline revlog (yet)')
1972 2025 # make sure internals are initialized
1973 2026 dest.revision(len(dest) - 1)
1974 2027 yield dest
1975 2028 del dest, vfs
1976 2029 finally:
1977 2030 shutil.rmtree(tmpdir, True)
1978 2031
1979 2032 @command(b'perfrevlogchunks', revlogopts + formatteropts +
1980 2033 [(b'e', b'engines', b'', b'compression engines to use'),
1981 2034 (b's', b'startrev', 0, b'revision to start at')],
1982 2035 b'-c|-m|FILE')
1983 2036 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
1984 2037 """Benchmark operations on revlog chunks.
1985 2038
1986 2039 Logically, each revlog is a collection of fulltext revisions. However,
1987 2040 stored within each revlog are "chunks" of possibly compressed data. This
1988 2041 data needs to be read and decompressed or compressed and written.
1989 2042
1990 2043 This command measures the time it takes to read+decompress and recompress
1991 2044 chunks in a revlog. It effectively isolates I/O and compression performance.
1992 2045 For measurements of higher-level operations like resolving revisions,
1993 2046 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
1994 2047 """
1995 2048 opts = _byteskwargs(opts)
1996 2049
1997 2050 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
1998 2051
1999 2052 # _chunkraw was renamed to _getsegmentforrevs.
2000 2053 try:
2001 2054 segmentforrevs = rl._getsegmentforrevs
2002 2055 except AttributeError:
2003 2056 segmentforrevs = rl._chunkraw
2004 2057
2005 2058 # Verify engines argument.
2006 2059 if engines:
2007 2060 engines = set(e.strip() for e in engines.split(b','))
2008 2061 for engine in engines:
2009 2062 try:
2010 2063 util.compressionengines[engine]
2011 2064 except KeyError:
2012 2065 raise error.Abort(b'unknown compression engine: %s' % engine)
2013 2066 else:
2014 2067 engines = []
2015 2068 for e in util.compengines:
2016 2069 engine = util.compengines[e]
2017 2070 try:
2018 2071 if engine.available():
2019 2072 engine.revlogcompressor().compress(b'dummy')
2020 2073 engines.append(e)
2021 2074 except NotImplementedError:
2022 2075 pass
2023 2076
2024 2077 revs = list(rl.revs(startrev, len(rl) - 1))
2025 2078
2026 2079 def rlfh(rl):
2027 2080 if rl._inline:
2028 2081 return getsvfs(repo)(rl.indexfile)
2029 2082 else:
2030 2083 return getsvfs(repo)(rl.datafile)
2031 2084
2032 2085 def doread():
2033 2086 rl.clearcaches()
2034 2087 for rev in revs:
2035 2088 segmentforrevs(rev, rev)
2036 2089
2037 2090 def doreadcachedfh():
2038 2091 rl.clearcaches()
2039 2092 fh = rlfh(rl)
2040 2093 for rev in revs:
2041 2094 segmentforrevs(rev, rev, df=fh)
2042 2095
2043 2096 def doreadbatch():
2044 2097 rl.clearcaches()
2045 2098 segmentforrevs(revs[0], revs[-1])
2046 2099
2047 2100 def doreadbatchcachedfh():
2048 2101 rl.clearcaches()
2049 2102 fh = rlfh(rl)
2050 2103 segmentforrevs(revs[0], revs[-1], df=fh)
2051 2104
2052 2105 def dochunk():
2053 2106 rl.clearcaches()
2054 2107 fh = rlfh(rl)
2055 2108 for rev in revs:
2056 2109 rl._chunk(rev, df=fh)
2057 2110
2058 2111 chunks = [None]
2059 2112
2060 2113 def dochunkbatch():
2061 2114 rl.clearcaches()
2062 2115 fh = rlfh(rl)
2063 2116 # Save chunks as a side-effect.
2064 2117 chunks[0] = rl._chunks(revs, df=fh)
2065 2118
2066 2119 def docompress(compressor):
2067 2120 rl.clearcaches()
2068 2121
2069 2122 try:
2070 2123 # Swap in the requested compression engine.
2071 2124 oldcompressor = rl._compressor
2072 2125 rl._compressor = compressor
2073 2126 for chunk in chunks[0]:
2074 2127 rl.compress(chunk)
2075 2128 finally:
2076 2129 rl._compressor = oldcompressor
2077 2130
2078 2131 benches = [
2079 2132 (lambda: doread(), b'read'),
2080 2133 (lambda: doreadcachedfh(), b'read w/ reused fd'),
2081 2134 (lambda: doreadbatch(), b'read batch'),
2082 2135 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
2083 2136 (lambda: dochunk(), b'chunk'),
2084 2137 (lambda: dochunkbatch(), b'chunk batch'),
2085 2138 ]
2086 2139
2087 2140 for engine in sorted(engines):
2088 2141 compressor = util.compengines[engine].revlogcompressor()
2089 2142 benches.append((functools.partial(docompress, compressor),
2090 2143 b'compress w/ %s' % engine))
2091 2144
2092 2145 for fn, title in benches:
2093 2146 timer, fm = gettimer(ui, opts)
2094 2147 timer(fn, title=title)
2095 2148 fm.end()
2096 2149
2097 2150 @command(b'perfrevlogrevision', revlogopts + formatteropts +
2098 2151 [(b'', b'cache', False, b'use caches instead of clearing')],
2099 2152 b'-c|-m|FILE REV')
2100 2153 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
2101 2154 """Benchmark obtaining a revlog revision.
2102 2155
2103 2156 Obtaining a revlog revision consists of roughly the following steps:
2104 2157
2105 2158 1. Compute the delta chain
2106 2159 2. Slice the delta chain if applicable
2107 2160 3. Obtain the raw chunks for that delta chain
2108 2161 4. Decompress each raw chunk
2109 2162 5. Apply binary patches to obtain fulltext
2110 2163 6. Verify hash of fulltext
2111 2164
2112 2165 This command measures the time spent in each of these phases.
2113 2166 """
2114 2167 opts = _byteskwargs(opts)
2115 2168
2116 2169 if opts.get(b'changelog') or opts.get(b'manifest'):
2117 2170 file_, rev = None, file_
2118 2171 elif rev is None:
2119 2172 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
2120 2173
2121 2174 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
2122 2175
2123 2176 # _chunkraw was renamed to _getsegmentforrevs.
2124 2177 try:
2125 2178 segmentforrevs = r._getsegmentforrevs
2126 2179 except AttributeError:
2127 2180 segmentforrevs = r._chunkraw
2128 2181
2129 2182 node = r.lookup(rev)
2130 2183 rev = r.rev(node)
2131 2184
2132 2185 def getrawchunks(data, chain):
2133 2186 start = r.start
2134 2187 length = r.length
2135 2188 inline = r._inline
2136 2189 iosize = r._io.size
2137 2190 buffer = util.buffer
2138 2191
2139 2192 chunks = []
2140 2193 ladd = chunks.append
2141 2194 for idx, item in enumerate(chain):
2142 2195 offset = start(item[0])
2143 2196 bits = data[idx]
2144 2197 for rev in item:
2145 2198 chunkstart = start(rev)
2146 2199 if inline:
2147 2200 chunkstart += (rev + 1) * iosize
2148 2201 chunklength = length(rev)
2149 2202 ladd(buffer(bits, chunkstart - offset, chunklength))
2150 2203
2151 2204 return chunks
2152 2205
2153 2206 def dodeltachain(rev):
2154 2207 if not cache:
2155 2208 r.clearcaches()
2156 2209 r._deltachain(rev)
2157 2210
2158 2211 def doread(chain):
2159 2212 if not cache:
2160 2213 r.clearcaches()
2161 2214 for item in slicedchain:
2162 2215 segmentforrevs(item[0], item[-1])
2163 2216
2164 2217 def doslice(r, chain, size):
2165 2218 for s in slicechunk(r, chain, targetsize=size):
2166 2219 pass
2167 2220
2168 2221 def dorawchunks(data, chain):
2169 2222 if not cache:
2170 2223 r.clearcaches()
2171 2224 getrawchunks(data, chain)
2172 2225
2173 2226 def dodecompress(chunks):
2174 2227 decomp = r.decompress
2175 2228 for chunk in chunks:
2176 2229 decomp(chunk)
2177 2230
2178 2231 def dopatch(text, bins):
2179 2232 if not cache:
2180 2233 r.clearcaches()
2181 2234 mdiff.patches(text, bins)
2182 2235
2183 2236 def dohash(text):
2184 2237 if not cache:
2185 2238 r.clearcaches()
2186 2239 r.checkhash(text, node, rev=rev)
2187 2240
2188 2241 def dorevision():
2189 2242 if not cache:
2190 2243 r.clearcaches()
2191 2244 r.revision(node)
2192 2245
2193 2246 try:
2194 2247 from mercurial.revlogutils.deltas import slicechunk
2195 2248 except ImportError:
2196 2249 slicechunk = getattr(revlog, '_slicechunk', None)
2197 2250
2198 2251 size = r.length(rev)
2199 2252 chain = r._deltachain(rev)[0]
2200 2253 if not getattr(r, '_withsparseread', False):
2201 2254 slicedchain = (chain,)
2202 2255 else:
2203 2256 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
2204 2257 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
2205 2258 rawchunks = getrawchunks(data, slicedchain)
2206 2259 bins = r._chunks(chain)
2207 2260 text = bytes(bins[0])
2208 2261 bins = bins[1:]
2209 2262 text = mdiff.patches(text, bins)
2210 2263
2211 2264 benches = [
2212 2265 (lambda: dorevision(), b'full'),
2213 2266 (lambda: dodeltachain(rev), b'deltachain'),
2214 2267 (lambda: doread(chain), b'read'),
2215 2268 ]
2216 2269
2217 2270 if getattr(r, '_withsparseread', False):
2218 2271 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
2219 2272 benches.append(slicing)
2220 2273
2221 2274 benches.extend([
2222 2275 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
2223 2276 (lambda: dodecompress(rawchunks), b'decompress'),
2224 2277 (lambda: dopatch(text, bins), b'patch'),
2225 2278 (lambda: dohash(text), b'hash'),
2226 2279 ])
2227 2280
2228 2281 timer, fm = gettimer(ui, opts)
2229 2282 for fn, title in benches:
2230 2283 timer(fn, title=title)
2231 2284 fm.end()
2232 2285
2233 2286 @command(b'perfrevset',
2234 2287 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
2235 2288 (b'', b'contexts', False, b'obtain changectx for each revision')]
2236 2289 + formatteropts, b"REVSET")
2237 2290 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
2238 2291 """benchmark the execution time of a revset
2239 2292
2240 2293 Use the --clean option if need to evaluate the impact of build volatile
2241 2294 revisions set cache on the revset execution. Volatile cache hold filtered
2242 2295 and obsolete related cache."""
2243 2296 opts = _byteskwargs(opts)
2244 2297
2245 2298 timer, fm = gettimer(ui, opts)
2246 2299 def d():
2247 2300 if clear:
2248 2301 repo.invalidatevolatilesets()
2249 2302 if contexts:
2250 2303 for ctx in repo.set(expr): pass
2251 2304 else:
2252 2305 for r in repo.revs(expr): pass
2253 2306 timer(d)
2254 2307 fm.end()
2255 2308
2256 2309 @command(b'perfvolatilesets',
2257 2310 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
2258 2311 ] + formatteropts)
2259 2312 def perfvolatilesets(ui, repo, *names, **opts):
2260 2313 """benchmark the computation of various volatile set
2261 2314
2262 2315 Volatile set computes element related to filtering and obsolescence."""
2263 2316 opts = _byteskwargs(opts)
2264 2317 timer, fm = gettimer(ui, opts)
2265 2318 repo = repo.unfiltered()
2266 2319
2267 2320 def getobs(name):
2268 2321 def d():
2269 2322 repo.invalidatevolatilesets()
2270 2323 if opts[b'clear_obsstore']:
2271 2324 clearfilecache(repo, b'obsstore')
2272 2325 obsolete.getrevs(repo, name)
2273 2326 return d
2274 2327
2275 2328 allobs = sorted(obsolete.cachefuncs)
2276 2329 if names:
2277 2330 allobs = [n for n in allobs if n in names]
2278 2331
2279 2332 for name in allobs:
2280 2333 timer(getobs(name), title=name)
2281 2334
2282 2335 def getfiltered(name):
2283 2336 def d():
2284 2337 repo.invalidatevolatilesets()
2285 2338 if opts[b'clear_obsstore']:
2286 2339 clearfilecache(repo, b'obsstore')
2287 2340 repoview.filterrevs(repo, name)
2288 2341 return d
2289 2342
2290 2343 allfilter = sorted(repoview.filtertable)
2291 2344 if names:
2292 2345 allfilter = [n for n in allfilter if n in names]
2293 2346
2294 2347 for name in allfilter:
2295 2348 timer(getfiltered(name), title=name)
2296 2349 fm.end()
2297 2350
2298 2351 @command(b'perfbranchmap',
2299 2352 [(b'f', b'full', False,
2300 2353 b'Includes build time of subset'),
2301 2354 (b'', b'clear-revbranch', False,
2302 2355 b'purge the revbranch cache between computation'),
2303 2356 ] + formatteropts)
2304 2357 def perfbranchmap(ui, repo, *filternames, **opts):
2305 2358 """benchmark the update of a branchmap
2306 2359
2307 2360 This benchmarks the full repo.branchmap() call with read and write disabled
2308 2361 """
2309 2362 opts = _byteskwargs(opts)
2310 2363 full = opts.get(b"full", False)
2311 2364 clear_revbranch = opts.get(b"clear_revbranch", False)
2312 2365 timer, fm = gettimer(ui, opts)
2313 2366 def getbranchmap(filtername):
2314 2367 """generate a benchmark function for the filtername"""
2315 2368 if filtername is None:
2316 2369 view = repo
2317 2370 else:
2318 2371 view = repo.filtered(filtername)
2319 2372 def d():
2320 2373 if clear_revbranch:
2321 2374 repo.revbranchcache()._clear()
2322 2375 if full:
2323 2376 view._branchcaches.clear()
2324 2377 else:
2325 2378 view._branchcaches.pop(filtername, None)
2326 2379 view.branchmap()
2327 2380 return d
2328 2381 # add filter in smaller subset to bigger subset
2329 2382 possiblefilters = set(repoview.filtertable)
2330 2383 if filternames:
2331 2384 possiblefilters &= set(filternames)
2332 2385 subsettable = getbranchmapsubsettable()
2333 2386 allfilters = []
2334 2387 while possiblefilters:
2335 2388 for name in possiblefilters:
2336 2389 subset = subsettable.get(name)
2337 2390 if subset not in possiblefilters:
2338 2391 break
2339 2392 else:
2340 2393 assert False, b'subset cycle %s!' % possiblefilters
2341 2394 allfilters.append(name)
2342 2395 possiblefilters.remove(name)
2343 2396
2344 2397 # warm the cache
2345 2398 if not full:
2346 2399 for name in allfilters:
2347 2400 repo.filtered(name).branchmap()
2348 2401 if not filternames or b'unfiltered' in filternames:
2349 2402 # add unfiltered
2350 2403 allfilters.append(None)
2351 2404
2352 2405 branchcacheread = safeattrsetter(branchmap, b'read')
2353 2406 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
2354 2407 branchcacheread.set(lambda repo: None)
2355 2408 branchcachewrite.set(lambda bc, repo: None)
2356 2409 try:
2357 2410 for name in allfilters:
2358 2411 printname = name
2359 2412 if name is None:
2360 2413 printname = b'unfiltered'
2361 2414 timer(getbranchmap(name), title=str(printname))
2362 2415 finally:
2363 2416 branchcacheread.restore()
2364 2417 branchcachewrite.restore()
2365 2418 fm.end()
2366 2419
2367 2420 @command(b'perfbranchmapupdate', [
2368 2421 (b'', b'base', [], b'subset of revision to start from'),
2369 2422 (b'', b'target', [], b'subset of revision to end with'),
2370 2423 (b'', b'clear-caches', False, b'clear cache between each runs')
2371 2424 ] + formatteropts)
2372 2425 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
2373 2426 """benchmark branchmap update from for <base> revs to <target> revs
2374 2427
2375 2428 If `--clear-caches` is passed, the following items will be reset before
2376 2429 each update:
2377 2430 * the changelog instance and associated indexes
2378 2431 * the rev-branch-cache instance
2379 2432
2380 2433 Examples:
2381 2434
2382 2435 # update for the one last revision
2383 2436 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
2384 2437
2385 2438 $ update for change coming with a new branch
2386 2439 $ hg perfbranchmapupdate --base 'stable' --target 'default'
2387 2440 """
2388 2441 from mercurial import branchmap
2389 2442 from mercurial import repoview
2390 2443 opts = _byteskwargs(opts)
2391 2444 timer, fm = gettimer(ui, opts)
2392 2445 clearcaches = opts[b'clear_caches']
2393 2446 unfi = repo.unfiltered()
2394 2447 x = [None] # used to pass data between closure
2395 2448
2396 2449 # we use a `list` here to avoid possible side effect from smartset
2397 2450 baserevs = list(scmutil.revrange(repo, base))
2398 2451 targetrevs = list(scmutil.revrange(repo, target))
2399 2452 if not baserevs:
2400 2453 raise error.Abort(b'no revisions selected for --base')
2401 2454 if not targetrevs:
2402 2455 raise error.Abort(b'no revisions selected for --target')
2403 2456
2404 2457 # make sure the target branchmap also contains the one in the base
2405 2458 targetrevs = list(set(baserevs) | set(targetrevs))
2406 2459 targetrevs.sort()
2407 2460
2408 2461 cl = repo.changelog
2409 2462 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
2410 2463 allbaserevs.sort()
2411 2464 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
2412 2465
2413 2466 newrevs = list(alltargetrevs.difference(allbaserevs))
2414 2467 newrevs.sort()
2415 2468
2416 2469 allrevs = frozenset(unfi.changelog.revs())
2417 2470 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
2418 2471 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
2419 2472
2420 2473 def basefilter(repo, visibilityexceptions=None):
2421 2474 return basefilterrevs
2422 2475
2423 2476 def targetfilter(repo, visibilityexceptions=None):
2424 2477 return targetfilterrevs
2425 2478
2426 2479 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
2427 2480 ui.status(msg % (len(allbaserevs), len(newrevs)))
2428 2481 if targetfilterrevs:
2429 2482 msg = b'(%d revisions still filtered)\n'
2430 2483 ui.status(msg % len(targetfilterrevs))
2431 2484
2432 2485 try:
2433 2486 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
2434 2487 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
2435 2488
2436 2489 baserepo = repo.filtered(b'__perf_branchmap_update_base')
2437 2490 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
2438 2491
2439 2492 # try to find an existing branchmap to reuse
2440 2493 subsettable = getbranchmapsubsettable()
2441 2494 candidatefilter = subsettable.get(None)
2442 2495 while candidatefilter is not None:
2443 2496 candidatebm = repo.filtered(candidatefilter).branchmap()
2444 2497 if candidatebm.validfor(baserepo):
2445 2498 filtered = repoview.filterrevs(repo, candidatefilter)
2446 2499 missing = [r for r in allbaserevs if r in filtered]
2447 2500 base = candidatebm.copy()
2448 2501 base.update(baserepo, missing)
2449 2502 break
2450 2503 candidatefilter = subsettable.get(candidatefilter)
2451 2504 else:
2452 2505 # no suitable subset where found
2453 2506 base = branchmap.branchcache()
2454 2507 base.update(baserepo, allbaserevs)
2455 2508
2456 2509 def setup():
2457 2510 x[0] = base.copy()
2458 2511 if clearcaches:
2459 2512 unfi._revbranchcache = None
2460 2513 clearchangelog(repo)
2461 2514
2462 2515 def bench():
2463 2516 x[0].update(targetrepo, newrevs)
2464 2517
2465 2518 timer(bench, setup=setup)
2466 2519 fm.end()
2467 2520 finally:
2468 2521 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
2469 2522 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
2470 2523
2471 2524 @command(b'perfbranchmapload', [
2472 2525 (b'f', b'filter', b'', b'Specify repoview filter'),
2473 2526 (b'', b'list', False, b'List brachmap filter caches'),
2474 2527 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
2475 2528
2476 2529 ] + formatteropts)
2477 2530 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
2478 2531 """benchmark reading the branchmap"""
2479 2532 opts = _byteskwargs(opts)
2480 2533 clearrevlogs = opts[b'clear_revlogs']
2481 2534
2482 2535 if list:
2483 2536 for name, kind, st in repo.cachevfs.readdir(stat=True):
2484 2537 if name.startswith(b'branch2'):
2485 2538 filtername = name.partition(b'-')[2] or b'unfiltered'
2486 2539 ui.status(b'%s - %s\n'
2487 2540 % (filtername, util.bytecount(st.st_size)))
2488 2541 return
2489 2542 if not filter:
2490 2543 filter = None
2491 2544 subsettable = getbranchmapsubsettable()
2492 2545 if filter is None:
2493 2546 repo = repo.unfiltered()
2494 2547 else:
2495 2548 repo = repoview.repoview(repo, filter)
2496 2549
2497 2550 repo.branchmap() # make sure we have a relevant, up to date branchmap
2498 2551
2499 2552 currentfilter = filter
2500 2553 # try once without timer, the filter may not be cached
2501 2554 while branchmap.read(repo) is None:
2502 2555 currentfilter = subsettable.get(currentfilter)
2503 2556 if currentfilter is None:
2504 2557 raise error.Abort(b'No branchmap cached for %s repo'
2505 2558 % (filter or b'unfiltered'))
2506 2559 repo = repo.filtered(currentfilter)
2507 2560 timer, fm = gettimer(ui, opts)
2508 2561 def setup():
2509 2562 if clearrevlogs:
2510 2563 clearchangelog(repo)
2511 2564 def bench():
2512 2565 branchmap.read(repo)
2513 2566 timer(bench, setup=setup)
2514 2567 fm.end()
2515 2568
2516 2569 @command(b'perfloadmarkers')
2517 2570 def perfloadmarkers(ui, repo):
2518 2571 """benchmark the time to parse the on-disk markers for a repo
2519 2572
2520 2573 Result is the number of markers in the repo."""
2521 2574 timer, fm = gettimer(ui)
2522 2575 svfs = getsvfs(repo)
2523 2576 timer(lambda: len(obsolete.obsstore(svfs)))
2524 2577 fm.end()
2525 2578
2526 2579 @command(b'perflrucachedict', formatteropts +
2527 2580 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
2528 2581 (b'', b'mincost', 0, b'smallest cost of items in cache'),
2529 2582 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
2530 2583 (b'', b'size', 4, b'size of cache'),
2531 2584 (b'', b'gets', 10000, b'number of key lookups'),
2532 2585 (b'', b'sets', 10000, b'number of key sets'),
2533 2586 (b'', b'mixed', 10000, b'number of mixed mode operations'),
2534 2587 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
2535 2588 norepo=True)
2536 2589 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
2537 2590 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
2538 2591 opts = _byteskwargs(opts)
2539 2592
2540 2593 def doinit():
2541 2594 for i in _xrange(10000):
2542 2595 util.lrucachedict(size)
2543 2596
2544 2597 costrange = list(range(mincost, maxcost + 1))
2545 2598
2546 2599 values = []
2547 2600 for i in _xrange(size):
2548 2601 values.append(random.randint(0, _maxint))
2549 2602
2550 2603 # Get mode fills the cache and tests raw lookup performance with no
2551 2604 # eviction.
2552 2605 getseq = []
2553 2606 for i in _xrange(gets):
2554 2607 getseq.append(random.choice(values))
2555 2608
2556 2609 def dogets():
2557 2610 d = util.lrucachedict(size)
2558 2611 for v in values:
2559 2612 d[v] = v
2560 2613 for key in getseq:
2561 2614 value = d[key]
2562 2615 value # silence pyflakes warning
2563 2616
2564 2617 def dogetscost():
2565 2618 d = util.lrucachedict(size, maxcost=costlimit)
2566 2619 for i, v in enumerate(values):
2567 2620 d.insert(v, v, cost=costs[i])
2568 2621 for key in getseq:
2569 2622 try:
2570 2623 value = d[key]
2571 2624 value # silence pyflakes warning
2572 2625 except KeyError:
2573 2626 pass
2574 2627
2575 2628 # Set mode tests insertion speed with cache eviction.
2576 2629 setseq = []
2577 2630 costs = []
2578 2631 for i in _xrange(sets):
2579 2632 setseq.append(random.randint(0, _maxint))
2580 2633 costs.append(random.choice(costrange))
2581 2634
2582 2635 def doinserts():
2583 2636 d = util.lrucachedict(size)
2584 2637 for v in setseq:
2585 2638 d.insert(v, v)
2586 2639
2587 2640 def doinsertscost():
2588 2641 d = util.lrucachedict(size, maxcost=costlimit)
2589 2642 for i, v in enumerate(setseq):
2590 2643 d.insert(v, v, cost=costs[i])
2591 2644
2592 2645 def dosets():
2593 2646 d = util.lrucachedict(size)
2594 2647 for v in setseq:
2595 2648 d[v] = v
2596 2649
2597 2650 # Mixed mode randomly performs gets and sets with eviction.
2598 2651 mixedops = []
2599 2652 for i in _xrange(mixed):
2600 2653 r = random.randint(0, 100)
2601 2654 if r < mixedgetfreq:
2602 2655 op = 0
2603 2656 else:
2604 2657 op = 1
2605 2658
2606 2659 mixedops.append((op,
2607 2660 random.randint(0, size * 2),
2608 2661 random.choice(costrange)))
2609 2662
2610 2663 def domixed():
2611 2664 d = util.lrucachedict(size)
2612 2665
2613 2666 for op, v, cost in mixedops:
2614 2667 if op == 0:
2615 2668 try:
2616 2669 d[v]
2617 2670 except KeyError:
2618 2671 pass
2619 2672 else:
2620 2673 d[v] = v
2621 2674
2622 2675 def domixedcost():
2623 2676 d = util.lrucachedict(size, maxcost=costlimit)
2624 2677
2625 2678 for op, v, cost in mixedops:
2626 2679 if op == 0:
2627 2680 try:
2628 2681 d[v]
2629 2682 except KeyError:
2630 2683 pass
2631 2684 else:
2632 2685 d.insert(v, v, cost=cost)
2633 2686
2634 2687 benches = [
2635 2688 (doinit, b'init'),
2636 2689 ]
2637 2690
2638 2691 if costlimit:
2639 2692 benches.extend([
2640 2693 (dogetscost, b'gets w/ cost limit'),
2641 2694 (doinsertscost, b'inserts w/ cost limit'),
2642 2695 (domixedcost, b'mixed w/ cost limit'),
2643 2696 ])
2644 2697 else:
2645 2698 benches.extend([
2646 2699 (dogets, b'gets'),
2647 2700 (doinserts, b'inserts'),
2648 2701 (dosets, b'sets'),
2649 2702 (domixed, b'mixed')
2650 2703 ])
2651 2704
2652 2705 for fn, title in benches:
2653 2706 timer, fm = gettimer(ui, opts)
2654 2707 timer(fn, title=title)
2655 2708 fm.end()
2656 2709
2657 2710 @command(b'perfwrite', formatteropts)
2658 2711 def perfwrite(ui, repo, **opts):
2659 2712 """microbenchmark ui.write
2660 2713 """
2661 2714 opts = _byteskwargs(opts)
2662 2715
2663 2716 timer, fm = gettimer(ui, opts)
2664 2717 def write():
2665 2718 for i in range(100000):
2666 2719 ui.write((b'Testing write performance\n'))
2667 2720 timer(write)
2668 2721 fm.end()
2669 2722
2670 2723 def uisetup(ui):
2671 2724 if (util.safehasattr(cmdutil, b'openrevlog') and
2672 2725 not util.safehasattr(commands, b'debugrevlogopts')):
2673 2726 # for "historical portability":
2674 2727 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
2675 2728 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
2676 2729 # openrevlog() should cause failure, because it has been
2677 2730 # available since 3.5 (or 49c583ca48c4).
2678 2731 def openrevlog(orig, repo, cmd, file_, opts):
2679 2732 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
2680 2733 raise error.Abort(b"This version doesn't support --dir option",
2681 2734 hint=b"use 3.5 or later")
2682 2735 return orig(repo, cmd, file_, opts)
2683 2736 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
2684 2737
2685 2738 @command(b'perfprogress', formatteropts + [
2686 2739 (b'', b'topic', b'topic', b'topic for progress messages'),
2687 2740 (b'c', b'total', 1000000, b'total value we are progressing to'),
2688 2741 ], norepo=True)
2689 2742 def perfprogress(ui, topic=None, total=None, **opts):
2690 2743 """printing of progress bars"""
2691 2744 opts = _byteskwargs(opts)
2692 2745
2693 2746 timer, fm = gettimer(ui, opts)
2694 2747
2695 2748 def doprogress():
2696 2749 with ui.makeprogress(topic, total=total) as progress:
2697 2750 for i in pycompat.xrange(total):
2698 2751 progress.increment()
2699 2752
2700 2753 timer(doprogress)
2701 2754 fm.end()
@@ -1,300 +1,302
1 1 #require test-repo
2 2
3 3 Set vars:
4 4
5 5 $ . "$TESTDIR/helpers-testrepo.sh"
6 6 $ CONTRIBDIR="$TESTDIR/../contrib"
7 7
8 8 Prepare repo:
9 9
10 10 $ hg init
11 11
12 12 $ echo this is file a > a
13 13 $ hg add a
14 14 $ hg commit -m first
15 15
16 16 $ echo adding to file a >> a
17 17 $ hg commit -m second
18 18
19 19 $ echo adding more to file a >> a
20 20 $ hg commit -m third
21 21
22 22 $ hg up -r 0
23 23 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
24 24 $ echo merge-this >> a
25 25 $ hg commit -m merge-able
26 26 created new head
27 27
28 28 $ hg up -r 2
29 29 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
30 30
31 31 perfstatus
32 32
33 33 $ cat >> $HGRCPATH << EOF
34 34 > [extensions]
35 35 > perfstatusext=$CONTRIBDIR/perf.py
36 36 > [perf]
37 37 > presleep=0
38 38 > stub=on
39 39 > parentscount=1
40 40 > EOF
41 41 $ hg help perfstatusext
42 42 perfstatusext extension - helper extension to measure performance
43 43
44 44 list of commands:
45 45
46 46 perfaddremove
47 47 (no help text available)
48 48 perfancestors
49 49 (no help text available)
50 50 perfancestorset
51 51 (no help text available)
52 52 perfannotate (no help text available)
53 53 perfbdiff benchmark a bdiff between revisions
54 54 perfbookmarks
55 55 benchmark parsing bookmarks from disk to memory
56 56 perfbranchmap
57 57 benchmark the update of a branchmap
58 58 perfbranchmapload
59 59 benchmark reading the branchmap
60 60 perfbranchmapupdate
61 61 benchmark branchmap update from for <base> revs to <target>
62 62 revs
63 63 perfbundleread
64 64 Benchmark reading of bundle files.
65 65 perfcca (no help text available)
66 66 perfchangegroupchangelog
67 67 Benchmark producing a changelog group for a changegroup.
68 68 perfchangeset
69 69 (no help text available)
70 70 perfctxfiles (no help text available)
71 71 perfdiffwd Profile diff of working directory changes
72 72 perfdirfoldmap
73 73 (no help text available)
74 74 perfdirs (no help text available)
75 75 perfdirstate (no help text available)
76 76 perfdirstatedirs
77 77 (no help text available)
78 78 perfdirstatefoldmap
79 79 (no help text available)
80 80 perfdirstatewrite
81 81 (no help text available)
82 82 perfdiscovery
83 83 benchmark discovery between local repo and the peer at given
84 84 path
85 85 perffncacheencode
86 86 (no help text available)
87 87 perffncacheload
88 88 (no help text available)
89 89 perffncachewrite
90 90 (no help text available)
91 91 perfheads benchmark the computation of a changelog heads
92 92 perfhelper-pathcopies
93 93 find statistic about potential parameters for the
94 94 'perftracecopies'
95 95 perfignore benchmark operation related to computing ignore
96 96 perfindex benchmark index creation time followed by a lookup
97 97 perflinelogedits
98 98 (no help text available)
99 99 perfloadmarkers
100 100 benchmark the time to parse the on-disk markers for a repo
101 101 perflog (no help text available)
102 102 perflookup (no help text available)
103 103 perflrucachedict
104 104 (no help text available)
105 105 perfmanifest benchmark the time to read a manifest from disk and return a
106 106 usable
107 107 perfmergecalculate
108 108 (no help text available)
109 109 perfmoonwalk benchmark walking the changelog backwards
110 110 perfnodelookup
111 111 (no help text available)
112 perfnodemap benchmark the time necessary to look up revision from a cold
113 nodemap
112 114 perfparents (no help text available)
113 115 perfpathcopies
114 116 benchmark the copy tracing logic
115 117 perfphases benchmark phasesets computation
116 118 perfphasesremote
117 119 benchmark time needed to analyse phases of the remote server
118 120 perfprogress printing of progress bars
119 121 perfrawfiles (no help text available)
120 122 perfrevlogchunks
121 123 Benchmark operations on revlog chunks.
122 124 perfrevlogindex
123 125 Benchmark operations against a revlog index.
124 126 perfrevlogrevision
125 127 Benchmark obtaining a revlog revision.
126 128 perfrevlogrevisions
127 129 Benchmark reading a series of revisions from a revlog.
128 130 perfrevlogwrite
129 131 Benchmark writing a series of revisions to a revlog.
130 132 perfrevrange (no help text available)
131 133 perfrevset benchmark the execution time of a revset
132 134 perfstartup (no help text available)
133 135 perfstatus (no help text available)
134 136 perftags (no help text available)
135 137 perftemplating
136 138 test the rendering time of a given template
137 139 perfunidiff benchmark a unified diff between revisions
138 140 perfvolatilesets
139 141 benchmark the computation of various volatile set
140 142 perfwalk (no help text available)
141 143 perfwrite microbenchmark ui.write
142 144
143 145 (use 'hg help -v perfstatusext' to show built-in aliases and global options)
144 146 $ hg perfaddremove
145 147 $ hg perfancestors
146 148 $ hg perfancestorset 2
147 149 $ hg perfannotate a
148 150 $ hg perfbdiff -c 1
149 151 $ hg perfbdiff --alldata 1
150 152 $ hg perfunidiff -c 1
151 153 $ hg perfunidiff --alldata 1
152 154 $ hg perfbookmarks
153 155 $ hg perfbranchmap
154 156 $ hg perfbranchmapload
155 157 $ hg perfbranchmapupdate --base "not tip" --target "tip"
156 158 benchmark of branchmap with 3 revisions with 1 new ones
157 159 $ hg perfcca
158 160 $ hg perfchangegroupchangelog
159 161 $ hg perfchangegroupchangelog --cgversion 01
160 162 $ hg perfchangeset 2
161 163 $ hg perfctxfiles 2
162 164 $ hg perfdiffwd
163 165 $ hg perfdirfoldmap
164 166 $ hg perfdirs
165 167 $ hg perfdirstate
166 168 $ hg perfdirstatedirs
167 169 $ hg perfdirstatefoldmap
168 170 $ hg perfdirstatewrite
169 171 #if repofncache
170 172 $ hg perffncacheencode
171 173 $ hg perffncacheload
172 174 $ hg debugrebuildfncache
173 175 fncache already up to date
174 176 $ hg perffncachewrite
175 177 $ hg debugrebuildfncache
176 178 fncache already up to date
177 179 #endif
178 180 $ hg perfheads
179 181 $ hg perfignore
180 182 $ hg perfindex
181 183 $ hg perflinelogedits -n 1
182 184 $ hg perfloadmarkers
183 185 $ hg perflog
184 186 $ hg perflookup 2
185 187 $ hg perflrucache
186 188 $ hg perfmanifest 2
187 189 $ hg perfmanifest -m 44fe2c8352bb3a478ffd7d8350bbc721920134d1
188 190 $ hg perfmanifest -m 44fe2c8352bb
189 191 abort: manifest revision must be integer or full node
190 192 [255]
191 193 $ hg perfmergecalculate -r 3
192 194 $ hg perfmoonwalk
193 195 $ hg perfnodelookup 2
194 196 $ hg perfpathcopies 1 2
195 197 $ hg perfprogress --total 1000
196 198 $ hg perfrawfiles 2
197 199 $ hg perfrevlogindex -c
198 200 #if reporevlogstore
199 201 $ hg perfrevlogrevisions .hg/store/data/a.i
200 202 #endif
201 203 $ hg perfrevlogrevision -m 0
202 204 $ hg perfrevlogchunks -c
203 205 $ hg perfrevrange
204 206 $ hg perfrevset 'all()'
205 207 $ hg perfstartup
206 208 $ hg perfstatus
207 209 $ hg perftags
208 210 $ hg perftemplating
209 211 $ hg perfvolatilesets
210 212 $ hg perfwalk
211 213 $ hg perfparents
212 214 $ hg perfdiscovery -q .
213 215
214 216 test actual output
215 217 ------------------
216 218
217 219 normal output:
218 220
219 221 $ hg perfheads --config perf.stub=no
220 222 ! wall * comb * user * sys * (best of *) (glob)
221 223
222 224 detailed output:
223 225
224 226 $ hg perfheads --config perf.all-timing=yes --config perf.stub=no
225 227 ! wall * comb * user * sys * (best of *) (glob)
226 228 ! wall * comb * user * sys * (max of *) (glob)
227 229 ! wall * comb * user * sys * (avg of *) (glob)
228 230 ! wall * comb * user * sys * (median of *) (glob)
229 231
230 232 test json output
231 233 ----------------
232 234
233 235 normal output:
234 236
235 237 $ hg perfheads --template json --config perf.stub=no
236 238 [
237 239 {
238 240 "comb": *, (glob)
239 241 "count": *, (glob)
240 242 "sys": *, (glob)
241 243 "user": *, (glob)
242 244 "wall": * (glob)
243 245 }
244 246 ]
245 247
246 248 detailed output:
247 249
248 250 $ hg perfheads --template json --config perf.all-timing=yes --config perf.stub=no
249 251 [
250 252 {
251 253 "avg.comb": *, (glob)
252 254 "avg.count": *, (glob)
253 255 "avg.sys": *, (glob)
254 256 "avg.user": *, (glob)
255 257 "avg.wall": *, (glob)
256 258 "comb": *, (glob)
257 259 "count": *, (glob)
258 260 "max.comb": *, (glob)
259 261 "max.count": *, (glob)
260 262 "max.sys": *, (glob)
261 263 "max.user": *, (glob)
262 264 "max.wall": *, (glob)
263 265 "median.comb": *, (glob)
264 266 "median.count": *, (glob)
265 267 "median.sys": *, (glob)
266 268 "median.user": *, (glob)
267 269 "median.wall": *, (glob)
268 270 "sys": *, (glob)
269 271 "user": *, (glob)
270 272 "wall": * (glob)
271 273 }
272 274 ]
273 275
274 276 Check perf.py for historical portability
275 277 ----------------------------------------
276 278
277 279 $ cd "$TESTDIR/.."
278 280
279 281 $ (testrepohg files -r 1.2 glob:mercurial/*.c glob:mercurial/*.py;
280 282 > testrepohg files -r tip glob:mercurial/*.c glob:mercurial/*.py) |
281 283 > "$TESTDIR"/check-perf-code.py contrib/perf.py
282 284 contrib/perf.py:\d+: (re)
283 285 > from mercurial import (
284 286 import newer module separately in try clause for early Mercurial
285 287 contrib/perf.py:\d+: (re)
286 288 > from mercurial import (
287 289 import newer module separately in try clause for early Mercurial
288 290 contrib/perf.py:\d+: (re)
289 291 > origindexpath = orig.opener.join(orig.indexfile)
290 292 use getvfs()/getsvfs() for early Mercurial
291 293 contrib/perf.py:\d+: (re)
292 294 > origdatapath = orig.opener.join(orig.datafile)
293 295 use getvfs()/getsvfs() for early Mercurial
294 296 contrib/perf.py:\d+: (re)
295 297 > vfs = vfsmod.vfs(tmpdir)
296 298 use getvfs()/getsvfs() for early Mercurial
297 299 contrib/perf.py:\d+: (re)
298 300 > vfs.options = getattr(orig.opener, 'options', None)
299 301 use getvfs()/getsvfs() for early Mercurial
300 302 [1]
General Comments 0
You need to be logged in to leave comments. Login now