##// END OF EJS Templates
perf: add some documentation to perfindex...
Boris Feld -
r41482:d65ba1ff default
parent child Browse files
Show More
@@ -1,2675 +1,2683
1 1 # perf.py - performance test routines
2 2 '''helper extension to measure performance'''
3 3
4 4 # "historical portability" policy of perf.py:
5 5 #
6 6 # We have to do:
7 7 # - make perf.py "loadable" with as wide Mercurial version as possible
8 8 # This doesn't mean that perf commands work correctly with that Mercurial.
9 9 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
10 10 # - make historical perf command work correctly with as wide Mercurial
11 11 # version as possible
12 12 #
13 13 # We have to do, if possible with reasonable cost:
14 14 # - make recent perf command for historical feature work correctly
15 15 # with early Mercurial
16 16 #
17 17 # We don't have to do:
18 18 # - make perf command for recent feature work correctly with early
19 19 # Mercurial
20 20
21 21 from __future__ import absolute_import
22 22 import contextlib
23 23 import functools
24 24 import gc
25 25 import os
26 26 import random
27 27 import shutil
28 28 import struct
29 29 import sys
30 30 import tempfile
31 31 import threading
32 32 import time
33 33 from mercurial import (
34 34 changegroup,
35 35 cmdutil,
36 36 commands,
37 37 copies,
38 38 error,
39 39 extensions,
40 40 hg,
41 41 mdiff,
42 42 merge,
43 43 revlog,
44 44 util,
45 45 )
46 46
47 47 # for "historical portability":
48 48 # try to import modules separately (in dict order), and ignore
49 49 # failure, because these aren't available with early Mercurial
50 50 try:
51 51 from mercurial import branchmap # since 2.5 (or bcee63733aad)
52 52 except ImportError:
53 53 pass
54 54 try:
55 55 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
56 56 except ImportError:
57 57 pass
58 58 try:
59 59 from mercurial import registrar # since 3.7 (or 37d50250b696)
60 60 dir(registrar) # forcibly load it
61 61 except ImportError:
62 62 registrar = None
63 63 try:
64 64 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
65 65 except ImportError:
66 66 pass
67 67 try:
68 68 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
69 69 except ImportError:
70 70 pass
71 71 try:
72 72 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
73 73 except ImportError:
74 74 pass
75 75
76 76
77 77 def identity(a):
78 78 return a
79 79
80 80 try:
81 81 from mercurial import pycompat
82 82 getargspec = pycompat.getargspec # added to module after 4.5
83 83 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
84 84 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
85 85 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
86 86 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
87 87 if pycompat.ispy3:
88 88 _maxint = sys.maxsize # per py3 docs for replacing maxint
89 89 else:
90 90 _maxint = sys.maxint
91 91 except (ImportError, AttributeError):
92 92 import inspect
93 93 getargspec = inspect.getargspec
94 94 _byteskwargs = identity
95 95 fsencode = identity # no py3 support
96 96 _maxint = sys.maxint # no py3 support
97 97 _sysstr = lambda x: x # no py3 support
98 98 _xrange = xrange
99 99
100 100 try:
101 101 # 4.7+
102 102 queue = pycompat.queue.Queue
103 103 except (AttributeError, ImportError):
104 104 # <4.7.
105 105 try:
106 106 queue = pycompat.queue
107 107 except (AttributeError, ImportError):
108 108 queue = util.queue
109 109
110 110 try:
111 111 from mercurial import logcmdutil
112 112 makelogtemplater = logcmdutil.maketemplater
113 113 except (AttributeError, ImportError):
114 114 try:
115 115 makelogtemplater = cmdutil.makelogtemplater
116 116 except (AttributeError, ImportError):
117 117 makelogtemplater = None
118 118
119 119 # for "historical portability":
120 120 # define util.safehasattr forcibly, because util.safehasattr has been
121 121 # available since 1.9.3 (or 94b200a11cf7)
122 122 _undefined = object()
123 123 def safehasattr(thing, attr):
124 124 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
125 125 setattr(util, 'safehasattr', safehasattr)
126 126
127 127 # for "historical portability":
128 128 # define util.timer forcibly, because util.timer has been available
129 129 # since ae5d60bb70c9
130 130 if safehasattr(time, 'perf_counter'):
131 131 util.timer = time.perf_counter
132 132 elif os.name == b'nt':
133 133 util.timer = time.clock
134 134 else:
135 135 util.timer = time.time
136 136
137 137 # for "historical portability":
138 138 # use locally defined empty option list, if formatteropts isn't
139 139 # available, because commands.formatteropts has been available since
140 140 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
141 141 # available since 2.2 (or ae5f92e154d3)
142 142 formatteropts = getattr(cmdutil, "formatteropts",
143 143 getattr(commands, "formatteropts", []))
144 144
145 145 # for "historical portability":
146 146 # use locally defined option list, if debugrevlogopts isn't available,
147 147 # because commands.debugrevlogopts has been available since 3.7 (or
148 148 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
149 149 # since 1.9 (or a79fea6b3e77).
150 150 revlogopts = getattr(cmdutil, "debugrevlogopts",
151 151 getattr(commands, "debugrevlogopts", [
152 152 (b'c', b'changelog', False, (b'open changelog')),
153 153 (b'm', b'manifest', False, (b'open manifest')),
154 154 (b'', b'dir', False, (b'open directory manifest')),
155 155 ]))
156 156
157 157 cmdtable = {}
158 158
159 159 # for "historical portability":
160 160 # define parsealiases locally, because cmdutil.parsealiases has been
161 161 # available since 1.5 (or 6252852b4332)
162 162 def parsealiases(cmd):
163 163 return cmd.split(b"|")
164 164
165 165 if safehasattr(registrar, 'command'):
166 166 command = registrar.command(cmdtable)
167 167 elif safehasattr(cmdutil, 'command'):
168 168 command = cmdutil.command(cmdtable)
169 169 if b'norepo' not in getargspec(command).args:
170 170 # for "historical portability":
171 171 # wrap original cmdutil.command, because "norepo" option has
172 172 # been available since 3.1 (or 75a96326cecb)
173 173 _command = command
174 174 def command(name, options=(), synopsis=None, norepo=False):
175 175 if norepo:
176 176 commands.norepo += b' %s' % b' '.join(parsealiases(name))
177 177 return _command(name, list(options), synopsis)
178 178 else:
179 179 # for "historical portability":
180 180 # define "@command" annotation locally, because cmdutil.command
181 181 # has been available since 1.9 (or 2daa5179e73f)
182 182 def command(name, options=(), synopsis=None, norepo=False):
183 183 def decorator(func):
184 184 if synopsis:
185 185 cmdtable[name] = func, list(options), synopsis
186 186 else:
187 187 cmdtable[name] = func, list(options)
188 188 if norepo:
189 189 commands.norepo += b' %s' % b' '.join(parsealiases(name))
190 190 return func
191 191 return decorator
192 192
193 193 try:
194 194 import mercurial.registrar
195 195 import mercurial.configitems
196 196 configtable = {}
197 197 configitem = mercurial.registrar.configitem(configtable)
198 198 configitem(b'perf', b'presleep',
199 199 default=mercurial.configitems.dynamicdefault,
200 200 )
201 201 configitem(b'perf', b'stub',
202 202 default=mercurial.configitems.dynamicdefault,
203 203 )
204 204 configitem(b'perf', b'parentscount',
205 205 default=mercurial.configitems.dynamicdefault,
206 206 )
207 207 configitem(b'perf', b'all-timing',
208 208 default=mercurial.configitems.dynamicdefault,
209 209 )
210 210 except (ImportError, AttributeError):
211 211 pass
212 212
213 213 def getlen(ui):
214 214 if ui.configbool(b"perf", b"stub", False):
215 215 return lambda x: 1
216 216 return len
217 217
218 218 def gettimer(ui, opts=None):
219 219 """return a timer function and formatter: (timer, formatter)
220 220
221 221 This function exists to gather the creation of formatter in a single
222 222 place instead of duplicating it in all performance commands."""
223 223
224 224 # enforce an idle period before execution to counteract power management
225 225 # experimental config: perf.presleep
226 226 time.sleep(getint(ui, b"perf", b"presleep", 1))
227 227
228 228 if opts is None:
229 229 opts = {}
230 230 # redirect all to stderr unless buffer api is in use
231 231 if not ui._buffers:
232 232 ui = ui.copy()
233 233 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
234 234 if uifout:
235 235 # for "historical portability":
236 236 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
237 237 uifout.set(ui.ferr)
238 238
239 239 # get a formatter
240 240 uiformatter = getattr(ui, 'formatter', None)
241 241 if uiformatter:
242 242 fm = uiformatter(b'perf', opts)
243 243 else:
244 244 # for "historical portability":
245 245 # define formatter locally, because ui.formatter has been
246 246 # available since 2.2 (or ae5f92e154d3)
247 247 from mercurial import node
248 248 class defaultformatter(object):
249 249 """Minimized composition of baseformatter and plainformatter
250 250 """
251 251 def __init__(self, ui, topic, opts):
252 252 self._ui = ui
253 253 if ui.debugflag:
254 254 self.hexfunc = node.hex
255 255 else:
256 256 self.hexfunc = node.short
257 257 def __nonzero__(self):
258 258 return False
259 259 __bool__ = __nonzero__
260 260 def startitem(self):
261 261 pass
262 262 def data(self, **data):
263 263 pass
264 264 def write(self, fields, deftext, *fielddata, **opts):
265 265 self._ui.write(deftext % fielddata, **opts)
266 266 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
267 267 if cond:
268 268 self._ui.write(deftext % fielddata, **opts)
269 269 def plain(self, text, **opts):
270 270 self._ui.write(text, **opts)
271 271 def end(self):
272 272 pass
273 273 fm = defaultformatter(ui, b'perf', opts)
274 274
275 275 # stub function, runs code only once instead of in a loop
276 276 # experimental config: perf.stub
277 277 if ui.configbool(b"perf", b"stub", False):
278 278 return functools.partial(stub_timer, fm), fm
279 279
280 280 # experimental config: perf.all-timing
281 281 displayall = ui.configbool(b"perf", b"all-timing", False)
282 282 return functools.partial(_timer, fm, displayall=displayall), fm
283 283
284 284 def stub_timer(fm, func, setup=None, title=None):
285 285 if setup is not None:
286 286 setup()
287 287 func()
288 288
289 289 @contextlib.contextmanager
290 290 def timeone():
291 291 r = []
292 292 ostart = os.times()
293 293 cstart = util.timer()
294 294 yield r
295 295 cstop = util.timer()
296 296 ostop = os.times()
297 297 a, b = ostart, ostop
298 298 r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
299 299
300 300 def _timer(fm, func, setup=None, title=None, displayall=False):
301 301 gc.collect()
302 302 results = []
303 303 begin = util.timer()
304 304 count = 0
305 305 while True:
306 306 if setup is not None:
307 307 setup()
308 308 with timeone() as item:
309 309 r = func()
310 310 count += 1
311 311 results.append(item[0])
312 312 cstop = util.timer()
313 313 if cstop - begin > 3 and count >= 100:
314 314 break
315 315 if cstop - begin > 10 and count >= 3:
316 316 break
317 317
318 318 formatone(fm, results, title=title, result=r,
319 319 displayall=displayall)
320 320
321 321 def formatone(fm, timings, title=None, result=None, displayall=False):
322 322
323 323 count = len(timings)
324 324
325 325 fm.startitem()
326 326
327 327 if title:
328 328 fm.write(b'title', b'! %s\n', title)
329 329 if result:
330 330 fm.write(b'result', b'! result: %s\n', result)
331 331 def display(role, entry):
332 332 prefix = b''
333 333 if role != b'best':
334 334 prefix = b'%s.' % role
335 335 fm.plain(b'!')
336 336 fm.write(prefix + b'wall', b' wall %f', entry[0])
337 337 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
338 338 fm.write(prefix + b'user', b' user %f', entry[1])
339 339 fm.write(prefix + b'sys', b' sys %f', entry[2])
340 340 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
341 341 fm.plain(b'\n')
342 342 timings.sort()
343 343 min_val = timings[0]
344 344 display(b'best', min_val)
345 345 if displayall:
346 346 max_val = timings[-1]
347 347 display(b'max', max_val)
348 348 avg = tuple([sum(x) / count for x in zip(*timings)])
349 349 display(b'avg', avg)
350 350 median = timings[len(timings) // 2]
351 351 display(b'median', median)
352 352
353 353 # utilities for historical portability
354 354
355 355 def getint(ui, section, name, default):
356 356 # for "historical portability":
357 357 # ui.configint has been available since 1.9 (or fa2b596db182)
358 358 v = ui.config(section, name, None)
359 359 if v is None:
360 360 return default
361 361 try:
362 362 return int(v)
363 363 except ValueError:
364 364 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
365 365 % (section, name, v))
366 366
367 367 def safeattrsetter(obj, name, ignoremissing=False):
368 368 """Ensure that 'obj' has 'name' attribute before subsequent setattr
369 369
370 370 This function is aborted, if 'obj' doesn't have 'name' attribute
371 371 at runtime. This avoids overlooking removal of an attribute, which
372 372 breaks assumption of performance measurement, in the future.
373 373
374 374 This function returns the object to (1) assign a new value, and
375 375 (2) restore an original value to the attribute.
376 376
377 377 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
378 378 abortion, and this function returns None. This is useful to
379 379 examine an attribute, which isn't ensured in all Mercurial
380 380 versions.
381 381 """
382 382 if not util.safehasattr(obj, name):
383 383 if ignoremissing:
384 384 return None
385 385 raise error.Abort((b"missing attribute %s of %s might break assumption"
386 386 b" of performance measurement") % (name, obj))
387 387
388 388 origvalue = getattr(obj, _sysstr(name))
389 389 class attrutil(object):
390 390 def set(self, newvalue):
391 391 setattr(obj, _sysstr(name), newvalue)
392 392 def restore(self):
393 393 setattr(obj, _sysstr(name), origvalue)
394 394
395 395 return attrutil()
396 396
397 397 # utilities to examine each internal API changes
398 398
399 399 def getbranchmapsubsettable():
400 400 # for "historical portability":
401 401 # subsettable is defined in:
402 402 # - branchmap since 2.9 (or 175c6fd8cacc)
403 403 # - repoview since 2.5 (or 59a9f18d4587)
404 404 for mod in (branchmap, repoview):
405 405 subsettable = getattr(mod, 'subsettable', None)
406 406 if subsettable:
407 407 return subsettable
408 408
409 409 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
410 410 # branchmap and repoview modules exist, but subsettable attribute
411 411 # doesn't)
412 412 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
413 413 hint=b"use 2.5 or later")
414 414
415 415 def getsvfs(repo):
416 416 """Return appropriate object to access files under .hg/store
417 417 """
418 418 # for "historical portability":
419 419 # repo.svfs has been available since 2.3 (or 7034365089bf)
420 420 svfs = getattr(repo, 'svfs', None)
421 421 if svfs:
422 422 return svfs
423 423 else:
424 424 return getattr(repo, 'sopener')
425 425
426 426 def getvfs(repo):
427 427 """Return appropriate object to access files under .hg
428 428 """
429 429 # for "historical portability":
430 430 # repo.vfs has been available since 2.3 (or 7034365089bf)
431 431 vfs = getattr(repo, 'vfs', None)
432 432 if vfs:
433 433 return vfs
434 434 else:
435 435 return getattr(repo, 'opener')
436 436
437 437 def repocleartagscachefunc(repo):
438 438 """Return the function to clear tags cache according to repo internal API
439 439 """
440 440 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
441 441 # in this case, setattr(repo, '_tagscache', None) or so isn't
442 442 # correct way to clear tags cache, because existing code paths
443 443 # expect _tagscache to be a structured object.
444 444 def clearcache():
445 445 # _tagscache has been filteredpropertycache since 2.5 (or
446 446 # 98c867ac1330), and delattr() can't work in such case
447 447 if b'_tagscache' in vars(repo):
448 448 del repo.__dict__[b'_tagscache']
449 449 return clearcache
450 450
451 451 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
452 452 if repotags: # since 1.4 (or 5614a628d173)
453 453 return lambda : repotags.set(None)
454 454
455 455 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
456 456 if repotagscache: # since 0.6 (or d7df759d0e97)
457 457 return lambda : repotagscache.set(None)
458 458
459 459 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
460 460 # this point, but it isn't so problematic, because:
461 461 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
462 462 # in perftags() causes failure soon
463 463 # - perf.py itself has been available since 1.1 (or eb240755386d)
464 464 raise error.Abort((b"tags API of this hg command is unknown"))
465 465
466 466 # utilities to clear cache
467 467
468 468 def clearfilecache(obj, attrname):
469 469 unfiltered = getattr(obj, 'unfiltered', None)
470 470 if unfiltered is not None:
471 471 obj = obj.unfiltered()
472 472 if attrname in vars(obj):
473 473 delattr(obj, attrname)
474 474 obj._filecache.pop(attrname, None)
475 475
476 476 def clearchangelog(repo):
477 477 if repo is not repo.unfiltered():
478 478 object.__setattr__(repo, r'_clcachekey', None)
479 479 object.__setattr__(repo, r'_clcache', None)
480 480 clearfilecache(repo.unfiltered(), 'changelog')
481 481
482 482 # perf commands
483 483
484 484 @command(b'perfwalk', formatteropts)
485 485 def perfwalk(ui, repo, *pats, **opts):
486 486 opts = _byteskwargs(opts)
487 487 timer, fm = gettimer(ui, opts)
488 488 m = scmutil.match(repo[None], pats, {})
489 489 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
490 490 ignored=False))))
491 491 fm.end()
492 492
493 493 @command(b'perfannotate', formatteropts)
494 494 def perfannotate(ui, repo, f, **opts):
495 495 opts = _byteskwargs(opts)
496 496 timer, fm = gettimer(ui, opts)
497 497 fc = repo[b'.'][f]
498 498 timer(lambda: len(fc.annotate(True)))
499 499 fm.end()
500 500
501 501 @command(b'perfstatus',
502 502 [(b'u', b'unknown', False,
503 503 b'ask status to look for unknown files')] + formatteropts)
504 504 def perfstatus(ui, repo, **opts):
505 505 opts = _byteskwargs(opts)
506 506 #m = match.always(repo.root, repo.getcwd())
507 507 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
508 508 # False))))
509 509 timer, fm = gettimer(ui, opts)
510 510 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
511 511 fm.end()
512 512
513 513 @command(b'perfaddremove', formatteropts)
514 514 def perfaddremove(ui, repo, **opts):
515 515 opts = _byteskwargs(opts)
516 516 timer, fm = gettimer(ui, opts)
517 517 try:
518 518 oldquiet = repo.ui.quiet
519 519 repo.ui.quiet = True
520 520 matcher = scmutil.match(repo[None])
521 521 opts[b'dry_run'] = True
522 522 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
523 523 finally:
524 524 repo.ui.quiet = oldquiet
525 525 fm.end()
526 526
527 527 def clearcaches(cl):
528 528 # behave somewhat consistently across internal API changes
529 529 if util.safehasattr(cl, b'clearcaches'):
530 530 cl.clearcaches()
531 531 elif util.safehasattr(cl, b'_nodecache'):
532 532 from mercurial.node import nullid, nullrev
533 533 cl._nodecache = {nullid: nullrev}
534 534 cl._nodepos = None
535 535
536 536 @command(b'perfheads', formatteropts)
537 537 def perfheads(ui, repo, **opts):
538 538 """benchmark the computation of a changelog heads"""
539 539 opts = _byteskwargs(opts)
540 540 timer, fm = gettimer(ui, opts)
541 541 cl = repo.changelog
542 542 def s():
543 543 clearcaches(cl)
544 544 def d():
545 545 len(cl.headrevs())
546 546 timer(d, setup=s)
547 547 fm.end()
548 548
549 549 @command(b'perftags', formatteropts+
550 550 [
551 551 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
552 552 ])
553 553 def perftags(ui, repo, **opts):
554 554 opts = _byteskwargs(opts)
555 555 timer, fm = gettimer(ui, opts)
556 556 repocleartagscache = repocleartagscachefunc(repo)
557 557 clearrevlogs = opts[b'clear_revlogs']
558 558 def s():
559 559 if clearrevlogs:
560 560 clearchangelog(repo)
561 561 clearfilecache(repo.unfiltered(), 'manifest')
562 562 repocleartagscache()
563 563 def t():
564 564 return len(repo.tags())
565 565 timer(t, setup=s)
566 566 fm.end()
567 567
568 568 @command(b'perfancestors', formatteropts)
569 569 def perfancestors(ui, repo, **opts):
570 570 opts = _byteskwargs(opts)
571 571 timer, fm = gettimer(ui, opts)
572 572 heads = repo.changelog.headrevs()
573 573 def d():
574 574 for a in repo.changelog.ancestors(heads):
575 575 pass
576 576 timer(d)
577 577 fm.end()
578 578
579 579 @command(b'perfancestorset', formatteropts)
580 580 def perfancestorset(ui, repo, revset, **opts):
581 581 opts = _byteskwargs(opts)
582 582 timer, fm = gettimer(ui, opts)
583 583 revs = repo.revs(revset)
584 584 heads = repo.changelog.headrevs()
585 585 def d():
586 586 s = repo.changelog.ancestors(heads)
587 587 for rev in revs:
588 588 rev in s
589 589 timer(d)
590 590 fm.end()
591 591
592 592 @command(b'perfdiscovery', formatteropts, b'PATH')
593 593 def perfdiscovery(ui, repo, path, **opts):
594 594 """benchmark discovery between local repo and the peer at given path
595 595 """
596 596 repos = [repo, None]
597 597 timer, fm = gettimer(ui, opts)
598 598 path = ui.expandpath(path)
599 599
600 600 def s():
601 601 repos[1] = hg.peer(ui, opts, path)
602 602 def d():
603 603 setdiscovery.findcommonheads(ui, *repos)
604 604 timer(d, setup=s)
605 605 fm.end()
606 606
607 607 @command(b'perfbookmarks', formatteropts +
608 608 [
609 609 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
610 610 ])
611 611 def perfbookmarks(ui, repo, **opts):
612 612 """benchmark parsing bookmarks from disk to memory"""
613 613 opts = _byteskwargs(opts)
614 614 timer, fm = gettimer(ui, opts)
615 615
616 616 clearrevlogs = opts[b'clear_revlogs']
617 617 def s():
618 618 if clearrevlogs:
619 619 clearchangelog(repo)
620 620 clearfilecache(repo, b'_bookmarks')
621 621 def d():
622 622 repo._bookmarks
623 623 timer(d, setup=s)
624 624 fm.end()
625 625
626 626 @command(b'perfbundleread', formatteropts, b'BUNDLE')
627 627 def perfbundleread(ui, repo, bundlepath, **opts):
628 628 """Benchmark reading of bundle files.
629 629
630 630 This command is meant to isolate the I/O part of bundle reading as
631 631 much as possible.
632 632 """
633 633 from mercurial import (
634 634 bundle2,
635 635 exchange,
636 636 streamclone,
637 637 )
638 638
639 639 opts = _byteskwargs(opts)
640 640
641 641 def makebench(fn):
642 642 def run():
643 643 with open(bundlepath, b'rb') as fh:
644 644 bundle = exchange.readbundle(ui, fh, bundlepath)
645 645 fn(bundle)
646 646
647 647 return run
648 648
649 649 def makereadnbytes(size):
650 650 def run():
651 651 with open(bundlepath, b'rb') as fh:
652 652 bundle = exchange.readbundle(ui, fh, bundlepath)
653 653 while bundle.read(size):
654 654 pass
655 655
656 656 return run
657 657
658 658 def makestdioread(size):
659 659 def run():
660 660 with open(bundlepath, b'rb') as fh:
661 661 while fh.read(size):
662 662 pass
663 663
664 664 return run
665 665
666 666 # bundle1
667 667
668 668 def deltaiter(bundle):
669 669 for delta in bundle.deltaiter():
670 670 pass
671 671
672 672 def iterchunks(bundle):
673 673 for chunk in bundle.getchunks():
674 674 pass
675 675
676 676 # bundle2
677 677
678 678 def forwardchunks(bundle):
679 679 for chunk in bundle._forwardchunks():
680 680 pass
681 681
682 682 def iterparts(bundle):
683 683 for part in bundle.iterparts():
684 684 pass
685 685
686 686 def iterpartsseekable(bundle):
687 687 for part in bundle.iterparts(seekable=True):
688 688 pass
689 689
690 690 def seek(bundle):
691 691 for part in bundle.iterparts(seekable=True):
692 692 part.seek(0, os.SEEK_END)
693 693
694 694 def makepartreadnbytes(size):
695 695 def run():
696 696 with open(bundlepath, b'rb') as fh:
697 697 bundle = exchange.readbundle(ui, fh, bundlepath)
698 698 for part in bundle.iterparts():
699 699 while part.read(size):
700 700 pass
701 701
702 702 return run
703 703
704 704 benches = [
705 705 (makestdioread(8192), b'read(8k)'),
706 706 (makestdioread(16384), b'read(16k)'),
707 707 (makestdioread(32768), b'read(32k)'),
708 708 (makestdioread(131072), b'read(128k)'),
709 709 ]
710 710
711 711 with open(bundlepath, b'rb') as fh:
712 712 bundle = exchange.readbundle(ui, fh, bundlepath)
713 713
714 714 if isinstance(bundle, changegroup.cg1unpacker):
715 715 benches.extend([
716 716 (makebench(deltaiter), b'cg1 deltaiter()'),
717 717 (makebench(iterchunks), b'cg1 getchunks()'),
718 718 (makereadnbytes(8192), b'cg1 read(8k)'),
719 719 (makereadnbytes(16384), b'cg1 read(16k)'),
720 720 (makereadnbytes(32768), b'cg1 read(32k)'),
721 721 (makereadnbytes(131072), b'cg1 read(128k)'),
722 722 ])
723 723 elif isinstance(bundle, bundle2.unbundle20):
724 724 benches.extend([
725 725 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
726 726 (makebench(iterparts), b'bundle2 iterparts()'),
727 727 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
728 728 (makebench(seek), b'bundle2 part seek()'),
729 729 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
730 730 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
731 731 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
732 732 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
733 733 ])
734 734 elif isinstance(bundle, streamclone.streamcloneapplier):
735 735 raise error.Abort(b'stream clone bundles not supported')
736 736 else:
737 737 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
738 738
739 739 for fn, title in benches:
740 740 timer, fm = gettimer(ui, opts)
741 741 timer(fn, title=title)
742 742 fm.end()
743 743
744 744 @command(b'perfchangegroupchangelog', formatteropts +
745 745 [(b'', b'cgversion', b'02', b'changegroup version'),
746 746 (b'r', b'rev', b'', b'revisions to add to changegroup')])
747 747 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
748 748 """Benchmark producing a changelog group for a changegroup.
749 749
750 750 This measures the time spent processing the changelog during a
751 751 bundle operation. This occurs during `hg bundle` and on a server
752 752 processing a `getbundle` wire protocol request (handles clones
753 753 and pull requests).
754 754
755 755 By default, all revisions are added to the changegroup.
756 756 """
757 757 opts = _byteskwargs(opts)
758 758 cl = repo.changelog
759 759 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
760 760 bundler = changegroup.getbundler(cgversion, repo)
761 761
762 762 def d():
763 763 state, chunks = bundler._generatechangelog(cl, nodes)
764 764 for chunk in chunks:
765 765 pass
766 766
767 767 timer, fm = gettimer(ui, opts)
768 768
769 769 # Terminal printing can interfere with timing. So disable it.
770 770 with ui.configoverride({(b'progress', b'disable'): True}):
771 771 timer(d)
772 772
773 773 fm.end()
774 774
775 775 @command(b'perfdirs', formatteropts)
776 776 def perfdirs(ui, repo, **opts):
777 777 opts = _byteskwargs(opts)
778 778 timer, fm = gettimer(ui, opts)
779 779 dirstate = repo.dirstate
780 780 b'a' in dirstate
781 781 def d():
782 782 dirstate.hasdir(b'a')
783 783 del dirstate._map._dirs
784 784 timer(d)
785 785 fm.end()
786 786
787 787 @command(b'perfdirstate', formatteropts)
788 788 def perfdirstate(ui, repo, **opts):
789 789 opts = _byteskwargs(opts)
790 790 timer, fm = gettimer(ui, opts)
791 791 b"a" in repo.dirstate
792 792 def d():
793 793 repo.dirstate.invalidate()
794 794 b"a" in repo.dirstate
795 795 timer(d)
796 796 fm.end()
797 797
798 798 @command(b'perfdirstatedirs', formatteropts)
799 799 def perfdirstatedirs(ui, repo, **opts):
800 800 opts = _byteskwargs(opts)
801 801 timer, fm = gettimer(ui, opts)
802 802 b"a" in repo.dirstate
803 803 def d():
804 804 repo.dirstate.hasdir(b"a")
805 805 del repo.dirstate._map._dirs
806 806 timer(d)
807 807 fm.end()
808 808
809 809 @command(b'perfdirstatefoldmap', formatteropts)
810 810 def perfdirstatefoldmap(ui, repo, **opts):
811 811 opts = _byteskwargs(opts)
812 812 timer, fm = gettimer(ui, opts)
813 813 dirstate = repo.dirstate
814 814 b'a' in dirstate
815 815 def d():
816 816 dirstate._map.filefoldmap.get(b'a')
817 817 del dirstate._map.filefoldmap
818 818 timer(d)
819 819 fm.end()
820 820
821 821 @command(b'perfdirfoldmap', formatteropts)
822 822 def perfdirfoldmap(ui, repo, **opts):
823 823 opts = _byteskwargs(opts)
824 824 timer, fm = gettimer(ui, opts)
825 825 dirstate = repo.dirstate
826 826 b'a' in dirstate
827 827 def d():
828 828 dirstate._map.dirfoldmap.get(b'a')
829 829 del dirstate._map.dirfoldmap
830 830 del dirstate._map._dirs
831 831 timer(d)
832 832 fm.end()
833 833
834 834 @command(b'perfdirstatewrite', formatteropts)
835 835 def perfdirstatewrite(ui, repo, **opts):
836 836 opts = _byteskwargs(opts)
837 837 timer, fm = gettimer(ui, opts)
838 838 ds = repo.dirstate
839 839 b"a" in ds
840 840 def d():
841 841 ds._dirty = True
842 842 ds.write(repo.currenttransaction())
843 843 timer(d)
844 844 fm.end()
845 845
846 846 @command(b'perfmergecalculate',
847 847 [(b'r', b'rev', b'.', b'rev to merge against')] + formatteropts)
848 848 def perfmergecalculate(ui, repo, rev, **opts):
849 849 opts = _byteskwargs(opts)
850 850 timer, fm = gettimer(ui, opts)
851 851 wctx = repo[None]
852 852 rctx = scmutil.revsingle(repo, rev, rev)
853 853 ancestor = wctx.ancestor(rctx)
854 854 # we don't want working dir files to be stat'd in the benchmark, so prime
855 855 # that cache
856 856 wctx.dirty()
857 857 def d():
858 858 # acceptremote is True because we don't want prompts in the middle of
859 859 # our benchmark
860 860 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
861 861 acceptremote=True, followcopies=True)
862 862 timer(d)
863 863 fm.end()
864 864
865 865 @command(b'perfpathcopies', [], b"REV REV")
866 866 def perfpathcopies(ui, repo, rev1, rev2, **opts):
867 867 """benchmark the copy tracing logic"""
868 868 opts = _byteskwargs(opts)
869 869 timer, fm = gettimer(ui, opts)
870 870 ctx1 = scmutil.revsingle(repo, rev1, rev1)
871 871 ctx2 = scmutil.revsingle(repo, rev2, rev2)
872 872 def d():
873 873 copies.pathcopies(ctx1, ctx2)
874 874 timer(d)
875 875 fm.end()
876 876
877 877 @command(b'perfphases',
878 878 [(b'', b'full', False, b'include file reading time too'),
879 879 ], b"")
880 880 def perfphases(ui, repo, **opts):
881 881 """benchmark phasesets computation"""
882 882 opts = _byteskwargs(opts)
883 883 timer, fm = gettimer(ui, opts)
884 884 _phases = repo._phasecache
885 885 full = opts.get(b'full')
886 886 def d():
887 887 phases = _phases
888 888 if full:
889 889 clearfilecache(repo, b'_phasecache')
890 890 phases = repo._phasecache
891 891 phases.invalidate()
892 892 phases.loadphaserevs(repo)
893 893 timer(d)
894 894 fm.end()
895 895
896 896 @command(b'perfphasesremote',
897 897 [], b"[DEST]")
898 898 def perfphasesremote(ui, repo, dest=None, **opts):
899 899 """benchmark time needed to analyse phases of the remote server"""
900 900 from mercurial.node import (
901 901 bin,
902 902 )
903 903 from mercurial import (
904 904 exchange,
905 905 hg,
906 906 phases,
907 907 )
908 908 opts = _byteskwargs(opts)
909 909 timer, fm = gettimer(ui, opts)
910 910
911 911 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
912 912 if not path:
913 913 raise error.Abort((b'default repository not configured!'),
914 914 hint=(b"see 'hg help config.paths'"))
915 915 dest = path.pushloc or path.loc
916 916 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
917 917 other = hg.peer(repo, opts, dest)
918 918
919 919 # easier to perform discovery through the operation
920 920 op = exchange.pushoperation(repo, other)
921 921 exchange._pushdiscoverychangeset(op)
922 922
923 923 remotesubset = op.fallbackheads
924 924
925 925 with other.commandexecutor() as e:
926 926 remotephases = e.callcommand(b'listkeys',
927 927 {b'namespace': b'phases'}).result()
928 928 del other
929 929 publishing = remotephases.get(b'publishing', False)
930 930 if publishing:
931 931 ui.status((b'publishing: yes\n'))
932 932 else:
933 933 ui.status((b'publishing: no\n'))
934 934
935 935 nodemap = repo.changelog.nodemap
936 936 nonpublishroots = 0
937 937 for nhex, phase in remotephases.iteritems():
938 938 if nhex == b'publishing': # ignore data related to publish option
939 939 continue
940 940 node = bin(nhex)
941 941 if node in nodemap and int(phase):
942 942 nonpublishroots += 1
943 943 ui.status((b'number of roots: %d\n') % len(remotephases))
944 944 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
945 945 def d():
946 946 phases.remotephasessummary(repo,
947 947 remotesubset,
948 948 remotephases)
949 949 timer(d)
950 950 fm.end()
951 951
952 952 @command(b'perfmanifest',[
953 953 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
954 954 (b'', b'clear-disk', False, b'clear on-disk caches too'),
955 955 ] + formatteropts, b'REV|NODE')
956 956 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
957 957 """benchmark the time to read a manifest from disk and return a usable
958 958 dict-like object
959 959
960 960 Manifest caches are cleared before retrieval."""
961 961 opts = _byteskwargs(opts)
962 962 timer, fm = gettimer(ui, opts)
963 963 if not manifest_rev:
964 964 ctx = scmutil.revsingle(repo, rev, rev)
965 965 t = ctx.manifestnode()
966 966 else:
967 967 from mercurial.node import bin
968 968
969 969 if len(rev) == 40:
970 970 t = bin(rev)
971 971 else:
972 972 try:
973 973 rev = int(rev)
974 974
975 975 if util.safehasattr(repo.manifestlog, b'getstorage'):
976 976 t = repo.manifestlog.getstorage(b'').node(rev)
977 977 else:
978 978 t = repo.manifestlog._revlog.lookup(rev)
979 979 except ValueError:
980 980 raise error.Abort(b'manifest revision must be integer or full '
981 981 b'node')
982 982 def d():
983 983 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
984 984 repo.manifestlog[t].read()
985 985 timer(d)
986 986 fm.end()
987 987
988 988 @command(b'perfchangeset', formatteropts)
989 989 def perfchangeset(ui, repo, rev, **opts):
990 990 opts = _byteskwargs(opts)
991 991 timer, fm = gettimer(ui, opts)
992 992 n = scmutil.revsingle(repo, rev).node()
993 993 def d():
994 994 repo.changelog.read(n)
995 995 #repo.changelog._cache = None
996 996 timer(d)
997 997 fm.end()
998 998
999 999 @command(b'perfignore', formatteropts)
1000 1000 def perfignore(ui, repo, **opts):
1001 1001 """benchmark operation related to computing ignore"""
1002 1002 opts = _byteskwargs(opts)
1003 1003 timer, fm = gettimer(ui, opts)
1004 1004 dirstate = repo.dirstate
1005 1005
1006 1006 def setupone():
1007 1007 dirstate.invalidate()
1008 1008 clearfilecache(dirstate, b'_ignore')
1009 1009
1010 1010 def runone():
1011 1011 dirstate._ignore
1012 1012
1013 1013 timer(runone, setup=setupone, title=b"load")
1014 1014 fm.end()
1015 1015
1016 1016 @command(b'perfindex', [
1017 1017 (b'', b'rev', b'', b'revision to be looked up (default tip)'),
1018 1018 ] + formatteropts)
1019 1019 def perfindex(ui, repo, **opts):
1020 """benchmark index creation time followed by a lookup
1021
1022 The default is to look `tip` up. Depending on the index implementation,
1023 the revision looked up can matters. For example, an implementation
1024 scanning the index will have a faster lookup time for `--rev tip` than for
1025 `--rev 0`.
1026
1027 It is not currently possible to check for lookup of a missing node."""
1020 1028 import mercurial.revlog
1021 1029 opts = _byteskwargs(opts)
1022 1030 timer, fm = gettimer(ui, opts)
1023 1031 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1024 1032 if opts[b'rev'] is None:
1025 1033 n = repo[b"tip"].node()
1026 1034 else:
1027 1035 rev = scmutil.revsingle(repo, opts[b'rev'])
1028 1036 n = repo[rev].node()
1029 1037
1030 1038 unfi = repo.unfiltered()
1031 1039 # find the filecache func directly
1032 1040 # This avoid polluting the benchmark with the filecache logic
1033 1041 makecl = unfi.__class__.changelog.func
1034 1042 def setup():
1035 1043 # probably not necessary, but for good measure
1036 1044 clearchangelog(unfi)
1037 1045 def d():
1038 1046 cl = makecl(unfi)
1039 1047 cl.rev(n)
1040 1048 timer(d, setup=setup)
1041 1049 fm.end()
1042 1050
1043 1051 @command(b'perfstartup', formatteropts)
1044 1052 def perfstartup(ui, repo, **opts):
1045 1053 opts = _byteskwargs(opts)
1046 1054 timer, fm = gettimer(ui, opts)
1047 1055 def d():
1048 1056 if os.name != r'nt':
1049 1057 os.system(b"HGRCPATH= %s version -q > /dev/null" %
1050 1058 fsencode(sys.argv[0]))
1051 1059 else:
1052 1060 os.environ[r'HGRCPATH'] = r' '
1053 1061 os.system(r"%s version -q > NUL" % sys.argv[0])
1054 1062 timer(d)
1055 1063 fm.end()
1056 1064
1057 1065 @command(b'perfparents', formatteropts)
1058 1066 def perfparents(ui, repo, **opts):
1059 1067 opts = _byteskwargs(opts)
1060 1068 timer, fm = gettimer(ui, opts)
1061 1069 # control the number of commits perfparents iterates over
1062 1070 # experimental config: perf.parentscount
1063 1071 count = getint(ui, b"perf", b"parentscount", 1000)
1064 1072 if len(repo.changelog) < count:
1065 1073 raise error.Abort(b"repo needs %d commits for this test" % count)
1066 1074 repo = repo.unfiltered()
1067 1075 nl = [repo.changelog.node(i) for i in _xrange(count)]
1068 1076 def d():
1069 1077 for n in nl:
1070 1078 repo.changelog.parents(n)
1071 1079 timer(d)
1072 1080 fm.end()
1073 1081
1074 1082 @command(b'perfctxfiles', formatteropts)
1075 1083 def perfctxfiles(ui, repo, x, **opts):
1076 1084 opts = _byteskwargs(opts)
1077 1085 x = int(x)
1078 1086 timer, fm = gettimer(ui, opts)
1079 1087 def d():
1080 1088 len(repo[x].files())
1081 1089 timer(d)
1082 1090 fm.end()
1083 1091
1084 1092 @command(b'perfrawfiles', formatteropts)
1085 1093 def perfrawfiles(ui, repo, x, **opts):
1086 1094 opts = _byteskwargs(opts)
1087 1095 x = int(x)
1088 1096 timer, fm = gettimer(ui, opts)
1089 1097 cl = repo.changelog
1090 1098 def d():
1091 1099 len(cl.read(x)[3])
1092 1100 timer(d)
1093 1101 fm.end()
1094 1102
1095 1103 @command(b'perflookup', formatteropts)
1096 1104 def perflookup(ui, repo, rev, **opts):
1097 1105 opts = _byteskwargs(opts)
1098 1106 timer, fm = gettimer(ui, opts)
1099 1107 timer(lambda: len(repo.lookup(rev)))
1100 1108 fm.end()
1101 1109
1102 1110 @command(b'perflinelogedits',
1103 1111 [(b'n', b'edits', 10000, b'number of edits'),
1104 1112 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1105 1113 ], norepo=True)
1106 1114 def perflinelogedits(ui, **opts):
1107 1115 from mercurial import linelog
1108 1116
1109 1117 opts = _byteskwargs(opts)
1110 1118
1111 1119 edits = opts[b'edits']
1112 1120 maxhunklines = opts[b'max_hunk_lines']
1113 1121
1114 1122 maxb1 = 100000
1115 1123 random.seed(0)
1116 1124 randint = random.randint
1117 1125 currentlines = 0
1118 1126 arglist = []
1119 1127 for rev in _xrange(edits):
1120 1128 a1 = randint(0, currentlines)
1121 1129 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1122 1130 b1 = randint(0, maxb1)
1123 1131 b2 = randint(b1, b1 + maxhunklines)
1124 1132 currentlines += (b2 - b1) - (a2 - a1)
1125 1133 arglist.append((rev, a1, a2, b1, b2))
1126 1134
1127 1135 def d():
1128 1136 ll = linelog.linelog()
1129 1137 for args in arglist:
1130 1138 ll.replacelines(*args)
1131 1139
1132 1140 timer, fm = gettimer(ui, opts)
1133 1141 timer(d)
1134 1142 fm.end()
1135 1143
1136 1144 @command(b'perfrevrange', formatteropts)
1137 1145 def perfrevrange(ui, repo, *specs, **opts):
1138 1146 opts = _byteskwargs(opts)
1139 1147 timer, fm = gettimer(ui, opts)
1140 1148 revrange = scmutil.revrange
1141 1149 timer(lambda: len(revrange(repo, specs)))
1142 1150 fm.end()
1143 1151
1144 1152 @command(b'perfnodelookup', formatteropts)
1145 1153 def perfnodelookup(ui, repo, rev, **opts):
1146 1154 opts = _byteskwargs(opts)
1147 1155 timer, fm = gettimer(ui, opts)
1148 1156 import mercurial.revlog
1149 1157 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1150 1158 n = scmutil.revsingle(repo, rev).node()
1151 1159 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1152 1160 def d():
1153 1161 cl.rev(n)
1154 1162 clearcaches(cl)
1155 1163 timer(d)
1156 1164 fm.end()
1157 1165
1158 1166 @command(b'perflog',
1159 1167 [(b'', b'rename', False, b'ask log to follow renames')
1160 1168 ] + formatteropts)
1161 1169 def perflog(ui, repo, rev=None, **opts):
1162 1170 opts = _byteskwargs(opts)
1163 1171 if rev is None:
1164 1172 rev=[]
1165 1173 timer, fm = gettimer(ui, opts)
1166 1174 ui.pushbuffer()
1167 1175 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1168 1176 copies=opts.get(b'rename')))
1169 1177 ui.popbuffer()
1170 1178 fm.end()
1171 1179
1172 1180 @command(b'perfmoonwalk', formatteropts)
1173 1181 def perfmoonwalk(ui, repo, **opts):
1174 1182 """benchmark walking the changelog backwards
1175 1183
1176 1184 This also loads the changelog data for each revision in the changelog.
1177 1185 """
1178 1186 opts = _byteskwargs(opts)
1179 1187 timer, fm = gettimer(ui, opts)
1180 1188 def moonwalk():
1181 1189 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1182 1190 ctx = repo[i]
1183 1191 ctx.branch() # read changelog data (in addition to the index)
1184 1192 timer(moonwalk)
1185 1193 fm.end()
1186 1194
1187 1195 @command(b'perftemplating',
1188 1196 [(b'r', b'rev', [], b'revisions to run the template on'),
1189 1197 ] + formatteropts)
1190 1198 def perftemplating(ui, repo, testedtemplate=None, **opts):
1191 1199 """test the rendering time of a given template"""
1192 1200 if makelogtemplater is None:
1193 1201 raise error.Abort((b"perftemplating not available with this Mercurial"),
1194 1202 hint=b"use 4.3 or later")
1195 1203
1196 1204 opts = _byteskwargs(opts)
1197 1205
1198 1206 nullui = ui.copy()
1199 1207 nullui.fout = open(os.devnull, r'wb')
1200 1208 nullui.disablepager()
1201 1209 revs = opts.get(b'rev')
1202 1210 if not revs:
1203 1211 revs = [b'all()']
1204 1212 revs = list(scmutil.revrange(repo, revs))
1205 1213
1206 1214 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1207 1215 b' {author|person}: {desc|firstline}\n')
1208 1216 if testedtemplate is None:
1209 1217 testedtemplate = defaulttemplate
1210 1218 displayer = makelogtemplater(nullui, repo, testedtemplate)
1211 1219 def format():
1212 1220 for r in revs:
1213 1221 ctx = repo[r]
1214 1222 displayer.show(ctx)
1215 1223 displayer.flush(ctx)
1216 1224
1217 1225 timer, fm = gettimer(ui, opts)
1218 1226 timer(format)
1219 1227 fm.end()
1220 1228
1221 1229 @command(b'perfhelper-pathcopies', formatteropts +
1222 1230 [
1223 1231 (b'r', b'revs', [], b'restrict search to these revisions'),
1224 1232 (b'', b'timing', False, b'provides extra data (costly)'),
1225 1233 ])
1226 1234 def perfhelperpathcopies(ui, repo, revs=[], **opts):
1227 1235 """find statistic about potential parameters for the `perftracecopies`
1228 1236
1229 1237 This command find source-destination pair relevant for copytracing testing.
1230 1238 It report value for some of the parameters that impact copy tracing time.
1231 1239
1232 1240 If `--timing` is set, rename detection is run and the associated timing
1233 1241 will be reported. The extra details comes at the cost of a slower command
1234 1242 execution.
1235 1243
1236 1244 Since the rename detection is only run once, other factors might easily
1237 1245 affect the precision of the timing. However it should give a good
1238 1246 approximation of which revision pairs are very costly.
1239 1247 """
1240 1248 opts = _byteskwargs(opts)
1241 1249 fm = ui.formatter(b'perf', opts)
1242 1250 dotiming = opts[b'timing']
1243 1251
1244 1252 if dotiming:
1245 1253 header = '%12s %12s %12s %12s %12s %12s\n'
1246 1254 output = ("%(source)12s %(destination)12s "
1247 1255 "%(nbrevs)12d %(nbmissingfiles)12d "
1248 1256 "%(nbrenamedfiles)12d %(time)18.5f\n")
1249 1257 header_names = ("source", "destination", "nb-revs", "nb-files",
1250 1258 "nb-renames", "time")
1251 1259 fm.plain(header % header_names)
1252 1260 else:
1253 1261 header = '%12s %12s %12s %12s\n'
1254 1262 output = ("%(source)12s %(destination)12s "
1255 1263 "%(nbrevs)12d %(nbmissingfiles)12d\n")
1256 1264 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
1257 1265
1258 1266 if not revs:
1259 1267 revs = ['all()']
1260 1268 revs = scmutil.revrange(repo, revs)
1261 1269
1262 1270 roi = repo.revs('merge() and %ld', revs)
1263 1271 for r in roi:
1264 1272 ctx = repo[r]
1265 1273 p1 = ctx.p1().rev()
1266 1274 p2 = ctx.p2().rev()
1267 1275 bases = repo.changelog._commonancestorsheads(p1, p2)
1268 1276 for p in (p1, p2):
1269 1277 for b in bases:
1270 1278 base = repo[b]
1271 1279 parent = repo[p]
1272 1280 missing = copies._computeforwardmissing(base, parent)
1273 1281 if not missing:
1274 1282 continue
1275 1283 data = {
1276 1284 b'source': base.hex(),
1277 1285 b'destination': parent.hex(),
1278 1286 b'nbrevs': len(repo.revs('%d::%d', b, p)),
1279 1287 b'nbmissingfiles': len(missing),
1280 1288 }
1281 1289 if dotiming:
1282 1290 begin = util.timer()
1283 1291 renames = copies.pathcopies(base, parent)
1284 1292 end = util.timer()
1285 1293 # not very stable timing since we did only one run
1286 1294 data['time'] = end - begin
1287 1295 data['nbrenamedfiles'] = len(renames)
1288 1296 fm.startitem()
1289 1297 fm.data(**data)
1290 1298 out = data.copy()
1291 1299 out['source'] = fm.hexfunc(base.node())
1292 1300 out['destination'] = fm.hexfunc(parent.node())
1293 1301 fm.plain(output % out)
1294 1302
1295 1303 fm.end()
1296 1304
1297 1305 @command(b'perfcca', formatteropts)
1298 1306 def perfcca(ui, repo, **opts):
1299 1307 opts = _byteskwargs(opts)
1300 1308 timer, fm = gettimer(ui, opts)
1301 1309 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1302 1310 fm.end()
1303 1311
1304 1312 @command(b'perffncacheload', formatteropts)
1305 1313 def perffncacheload(ui, repo, **opts):
1306 1314 opts = _byteskwargs(opts)
1307 1315 timer, fm = gettimer(ui, opts)
1308 1316 s = repo.store
1309 1317 def d():
1310 1318 s.fncache._load()
1311 1319 timer(d)
1312 1320 fm.end()
1313 1321
1314 1322 @command(b'perffncachewrite', formatteropts)
1315 1323 def perffncachewrite(ui, repo, **opts):
1316 1324 opts = _byteskwargs(opts)
1317 1325 timer, fm = gettimer(ui, opts)
1318 1326 s = repo.store
1319 1327 lock = repo.lock()
1320 1328 s.fncache._load()
1321 1329 tr = repo.transaction(b'perffncachewrite')
1322 1330 tr.addbackup(b'fncache')
1323 1331 def d():
1324 1332 s.fncache._dirty = True
1325 1333 s.fncache.write(tr)
1326 1334 timer(d)
1327 1335 tr.close()
1328 1336 lock.release()
1329 1337 fm.end()
1330 1338
1331 1339 @command(b'perffncacheencode', formatteropts)
1332 1340 def perffncacheencode(ui, repo, **opts):
1333 1341 opts = _byteskwargs(opts)
1334 1342 timer, fm = gettimer(ui, opts)
1335 1343 s = repo.store
1336 1344 s.fncache._load()
1337 1345 def d():
1338 1346 for p in s.fncache.entries:
1339 1347 s.encode(p)
1340 1348 timer(d)
1341 1349 fm.end()
1342 1350
1343 1351 def _bdiffworker(q, blocks, xdiff, ready, done):
1344 1352 while not done.is_set():
1345 1353 pair = q.get()
1346 1354 while pair is not None:
1347 1355 if xdiff:
1348 1356 mdiff.bdiff.xdiffblocks(*pair)
1349 1357 elif blocks:
1350 1358 mdiff.bdiff.blocks(*pair)
1351 1359 else:
1352 1360 mdiff.textdiff(*pair)
1353 1361 q.task_done()
1354 1362 pair = q.get()
1355 1363 q.task_done() # for the None one
1356 1364 with ready:
1357 1365 ready.wait()
1358 1366
1359 1367 def _manifestrevision(repo, mnode):
1360 1368 ml = repo.manifestlog
1361 1369
1362 1370 if util.safehasattr(ml, b'getstorage'):
1363 1371 store = ml.getstorage(b'')
1364 1372 else:
1365 1373 store = ml._revlog
1366 1374
1367 1375 return store.revision(mnode)
1368 1376
1369 1377 @command(b'perfbdiff', revlogopts + formatteropts + [
1370 1378 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1371 1379 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1372 1380 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1373 1381 (b'', b'blocks', False, b'test computing diffs into blocks'),
1374 1382 (b'', b'xdiff', False, b'use xdiff algorithm'),
1375 1383 ],
1376 1384
1377 1385 b'-c|-m|FILE REV')
1378 1386 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1379 1387 """benchmark a bdiff between revisions
1380 1388
1381 1389 By default, benchmark a bdiff between its delta parent and itself.
1382 1390
1383 1391 With ``--count``, benchmark bdiffs between delta parents and self for N
1384 1392 revisions starting at the specified revision.
1385 1393
1386 1394 With ``--alldata``, assume the requested revision is a changeset and
1387 1395 measure bdiffs for all changes related to that changeset (manifest
1388 1396 and filelogs).
1389 1397 """
1390 1398 opts = _byteskwargs(opts)
1391 1399
1392 1400 if opts[b'xdiff'] and not opts[b'blocks']:
1393 1401 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1394 1402
1395 1403 if opts[b'alldata']:
1396 1404 opts[b'changelog'] = True
1397 1405
1398 1406 if opts.get(b'changelog') or opts.get(b'manifest'):
1399 1407 file_, rev = None, file_
1400 1408 elif rev is None:
1401 1409 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1402 1410
1403 1411 blocks = opts[b'blocks']
1404 1412 xdiff = opts[b'xdiff']
1405 1413 textpairs = []
1406 1414
1407 1415 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1408 1416
1409 1417 startrev = r.rev(r.lookup(rev))
1410 1418 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1411 1419 if opts[b'alldata']:
1412 1420 # Load revisions associated with changeset.
1413 1421 ctx = repo[rev]
1414 1422 mtext = _manifestrevision(repo, ctx.manifestnode())
1415 1423 for pctx in ctx.parents():
1416 1424 pman = _manifestrevision(repo, pctx.manifestnode())
1417 1425 textpairs.append((pman, mtext))
1418 1426
1419 1427 # Load filelog revisions by iterating manifest delta.
1420 1428 man = ctx.manifest()
1421 1429 pman = ctx.p1().manifest()
1422 1430 for filename, change in pman.diff(man).items():
1423 1431 fctx = repo.file(filename)
1424 1432 f1 = fctx.revision(change[0][0] or -1)
1425 1433 f2 = fctx.revision(change[1][0] or -1)
1426 1434 textpairs.append((f1, f2))
1427 1435 else:
1428 1436 dp = r.deltaparent(rev)
1429 1437 textpairs.append((r.revision(dp), r.revision(rev)))
1430 1438
1431 1439 withthreads = threads > 0
1432 1440 if not withthreads:
1433 1441 def d():
1434 1442 for pair in textpairs:
1435 1443 if xdiff:
1436 1444 mdiff.bdiff.xdiffblocks(*pair)
1437 1445 elif blocks:
1438 1446 mdiff.bdiff.blocks(*pair)
1439 1447 else:
1440 1448 mdiff.textdiff(*pair)
1441 1449 else:
1442 1450 q = queue()
1443 1451 for i in _xrange(threads):
1444 1452 q.put(None)
1445 1453 ready = threading.Condition()
1446 1454 done = threading.Event()
1447 1455 for i in _xrange(threads):
1448 1456 threading.Thread(target=_bdiffworker,
1449 1457 args=(q, blocks, xdiff, ready, done)).start()
1450 1458 q.join()
1451 1459 def d():
1452 1460 for pair in textpairs:
1453 1461 q.put(pair)
1454 1462 for i in _xrange(threads):
1455 1463 q.put(None)
1456 1464 with ready:
1457 1465 ready.notify_all()
1458 1466 q.join()
1459 1467 timer, fm = gettimer(ui, opts)
1460 1468 timer(d)
1461 1469 fm.end()
1462 1470
1463 1471 if withthreads:
1464 1472 done.set()
1465 1473 for i in _xrange(threads):
1466 1474 q.put(None)
1467 1475 with ready:
1468 1476 ready.notify_all()
1469 1477
1470 1478 @command(b'perfunidiff', revlogopts + formatteropts + [
1471 1479 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1472 1480 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
1473 1481 ], b'-c|-m|FILE REV')
1474 1482 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1475 1483 """benchmark a unified diff between revisions
1476 1484
1477 1485 This doesn't include any copy tracing - it's just a unified diff
1478 1486 of the texts.
1479 1487
1480 1488 By default, benchmark a diff between its delta parent and itself.
1481 1489
1482 1490 With ``--count``, benchmark diffs between delta parents and self for N
1483 1491 revisions starting at the specified revision.
1484 1492
1485 1493 With ``--alldata``, assume the requested revision is a changeset and
1486 1494 measure diffs for all changes related to that changeset (manifest
1487 1495 and filelogs).
1488 1496 """
1489 1497 opts = _byteskwargs(opts)
1490 1498 if opts[b'alldata']:
1491 1499 opts[b'changelog'] = True
1492 1500
1493 1501 if opts.get(b'changelog') or opts.get(b'manifest'):
1494 1502 file_, rev = None, file_
1495 1503 elif rev is None:
1496 1504 raise error.CommandError(b'perfunidiff', b'invalid arguments')
1497 1505
1498 1506 textpairs = []
1499 1507
1500 1508 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
1501 1509
1502 1510 startrev = r.rev(r.lookup(rev))
1503 1511 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1504 1512 if opts[b'alldata']:
1505 1513 # Load revisions associated with changeset.
1506 1514 ctx = repo[rev]
1507 1515 mtext = _manifestrevision(repo, ctx.manifestnode())
1508 1516 for pctx in ctx.parents():
1509 1517 pman = _manifestrevision(repo, pctx.manifestnode())
1510 1518 textpairs.append((pman, mtext))
1511 1519
1512 1520 # Load filelog revisions by iterating manifest delta.
1513 1521 man = ctx.manifest()
1514 1522 pman = ctx.p1().manifest()
1515 1523 for filename, change in pman.diff(man).items():
1516 1524 fctx = repo.file(filename)
1517 1525 f1 = fctx.revision(change[0][0] or -1)
1518 1526 f2 = fctx.revision(change[1][0] or -1)
1519 1527 textpairs.append((f1, f2))
1520 1528 else:
1521 1529 dp = r.deltaparent(rev)
1522 1530 textpairs.append((r.revision(dp), r.revision(rev)))
1523 1531
1524 1532 def d():
1525 1533 for left, right in textpairs:
1526 1534 # The date strings don't matter, so we pass empty strings.
1527 1535 headerlines, hunks = mdiff.unidiff(
1528 1536 left, b'', right, b'', b'left', b'right', binary=False)
1529 1537 # consume iterators in roughly the way patch.py does
1530 1538 b'\n'.join(headerlines)
1531 1539 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1532 1540 timer, fm = gettimer(ui, opts)
1533 1541 timer(d)
1534 1542 fm.end()
1535 1543
1536 1544 @command(b'perfdiffwd', formatteropts)
1537 1545 def perfdiffwd(ui, repo, **opts):
1538 1546 """Profile diff of working directory changes"""
1539 1547 opts = _byteskwargs(opts)
1540 1548 timer, fm = gettimer(ui, opts)
1541 1549 options = {
1542 1550 'w': 'ignore_all_space',
1543 1551 'b': 'ignore_space_change',
1544 1552 'B': 'ignore_blank_lines',
1545 1553 }
1546 1554
1547 1555 for diffopt in ('', 'w', 'b', 'B', 'wB'):
1548 1556 opts = dict((options[c], b'1') for c in diffopt)
1549 1557 def d():
1550 1558 ui.pushbuffer()
1551 1559 commands.diff(ui, repo, **opts)
1552 1560 ui.popbuffer()
1553 1561 diffopt = diffopt.encode('ascii')
1554 1562 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
1555 1563 timer(d, title=title)
1556 1564 fm.end()
1557 1565
1558 1566 @command(b'perfrevlogindex', revlogopts + formatteropts,
1559 1567 b'-c|-m|FILE')
1560 1568 def perfrevlogindex(ui, repo, file_=None, **opts):
1561 1569 """Benchmark operations against a revlog index.
1562 1570
1563 1571 This tests constructing a revlog instance, reading index data,
1564 1572 parsing index data, and performing various operations related to
1565 1573 index data.
1566 1574 """
1567 1575
1568 1576 opts = _byteskwargs(opts)
1569 1577
1570 1578 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
1571 1579
1572 1580 opener = getattr(rl, 'opener') # trick linter
1573 1581 indexfile = rl.indexfile
1574 1582 data = opener.read(indexfile)
1575 1583
1576 1584 header = struct.unpack(b'>I', data[0:4])[0]
1577 1585 version = header & 0xFFFF
1578 1586 if version == 1:
1579 1587 revlogio = revlog.revlogio()
1580 1588 inline = header & (1 << 16)
1581 1589 else:
1582 1590 raise error.Abort((b'unsupported revlog version: %d') % version)
1583 1591
1584 1592 rllen = len(rl)
1585 1593
1586 1594 node0 = rl.node(0)
1587 1595 node25 = rl.node(rllen // 4)
1588 1596 node50 = rl.node(rllen // 2)
1589 1597 node75 = rl.node(rllen // 4 * 3)
1590 1598 node100 = rl.node(rllen - 1)
1591 1599
1592 1600 allrevs = range(rllen)
1593 1601 allrevsrev = list(reversed(allrevs))
1594 1602 allnodes = [rl.node(rev) for rev in range(rllen)]
1595 1603 allnodesrev = list(reversed(allnodes))
1596 1604
1597 1605 def constructor():
1598 1606 revlog.revlog(opener, indexfile)
1599 1607
1600 1608 def read():
1601 1609 with opener(indexfile) as fh:
1602 1610 fh.read()
1603 1611
1604 1612 def parseindex():
1605 1613 revlogio.parseindex(data, inline)
1606 1614
1607 1615 def getentry(revornode):
1608 1616 index = revlogio.parseindex(data, inline)[0]
1609 1617 index[revornode]
1610 1618
1611 1619 def getentries(revs, count=1):
1612 1620 index = revlogio.parseindex(data, inline)[0]
1613 1621
1614 1622 for i in range(count):
1615 1623 for rev in revs:
1616 1624 index[rev]
1617 1625
1618 1626 def resolvenode(node):
1619 1627 nodemap = revlogio.parseindex(data, inline)[1]
1620 1628 # This only works for the C code.
1621 1629 if nodemap is None:
1622 1630 return
1623 1631
1624 1632 try:
1625 1633 nodemap[node]
1626 1634 except error.RevlogError:
1627 1635 pass
1628 1636
1629 1637 def resolvenodes(nodes, count=1):
1630 1638 nodemap = revlogio.parseindex(data, inline)[1]
1631 1639 if nodemap is None:
1632 1640 return
1633 1641
1634 1642 for i in range(count):
1635 1643 for node in nodes:
1636 1644 try:
1637 1645 nodemap[node]
1638 1646 except error.RevlogError:
1639 1647 pass
1640 1648
1641 1649 benches = [
1642 1650 (constructor, b'revlog constructor'),
1643 1651 (read, b'read'),
1644 1652 (parseindex, b'create index object'),
1645 1653 (lambda: getentry(0), b'retrieve index entry for rev 0'),
1646 1654 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
1647 1655 (lambda: resolvenode(node0), b'look up node at rev 0'),
1648 1656 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
1649 1657 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
1650 1658 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
1651 1659 (lambda: resolvenode(node100), b'look up node at tip'),
1652 1660 # 2x variation is to measure caching impact.
1653 1661 (lambda: resolvenodes(allnodes),
1654 1662 b'look up all nodes (forward)'),
1655 1663 (lambda: resolvenodes(allnodes, 2),
1656 1664 b'look up all nodes 2x (forward)'),
1657 1665 (lambda: resolvenodes(allnodesrev),
1658 1666 b'look up all nodes (reverse)'),
1659 1667 (lambda: resolvenodes(allnodesrev, 2),
1660 1668 b'look up all nodes 2x (reverse)'),
1661 1669 (lambda: getentries(allrevs),
1662 1670 b'retrieve all index entries (forward)'),
1663 1671 (lambda: getentries(allrevs, 2),
1664 1672 b'retrieve all index entries 2x (forward)'),
1665 1673 (lambda: getentries(allrevsrev),
1666 1674 b'retrieve all index entries (reverse)'),
1667 1675 (lambda: getentries(allrevsrev, 2),
1668 1676 b'retrieve all index entries 2x (reverse)'),
1669 1677 ]
1670 1678
1671 1679 for fn, title in benches:
1672 1680 timer, fm = gettimer(ui, opts)
1673 1681 timer(fn, title=title)
1674 1682 fm.end()
1675 1683
1676 1684 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
1677 1685 [(b'd', b'dist', 100, b'distance between the revisions'),
1678 1686 (b's', b'startrev', 0, b'revision to start reading at'),
1679 1687 (b'', b'reverse', False, b'read in reverse')],
1680 1688 b'-c|-m|FILE')
1681 1689 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
1682 1690 **opts):
1683 1691 """Benchmark reading a series of revisions from a revlog.
1684 1692
1685 1693 By default, we read every ``-d/--dist`` revision from 0 to tip of
1686 1694 the specified revlog.
1687 1695
1688 1696 The start revision can be defined via ``-s/--startrev``.
1689 1697 """
1690 1698 opts = _byteskwargs(opts)
1691 1699
1692 1700 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
1693 1701 rllen = getlen(ui)(rl)
1694 1702
1695 1703 if startrev < 0:
1696 1704 startrev = rllen + startrev
1697 1705
1698 1706 def d():
1699 1707 rl.clearcaches()
1700 1708
1701 1709 beginrev = startrev
1702 1710 endrev = rllen
1703 1711 dist = opts[b'dist']
1704 1712
1705 1713 if reverse:
1706 1714 beginrev, endrev = endrev - 1, beginrev - 1
1707 1715 dist = -1 * dist
1708 1716
1709 1717 for x in _xrange(beginrev, endrev, dist):
1710 1718 # Old revisions don't support passing int.
1711 1719 n = rl.node(x)
1712 1720 rl.revision(n)
1713 1721
1714 1722 timer, fm = gettimer(ui, opts)
1715 1723 timer(d)
1716 1724 fm.end()
1717 1725
1718 1726 @command(b'perfrevlogwrite', revlogopts + formatteropts +
1719 1727 [(b's', b'startrev', 1000, b'revision to start writing at'),
1720 1728 (b'', b'stoprev', -1, b'last revision to write'),
1721 1729 (b'', b'count', 3, b'last revision to write'),
1722 1730 (b'', b'details', False, b'print timing for every revisions tested'),
1723 1731 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
1724 1732 (b'', b'lazydeltabase', True, b'try the provided delta first'),
1725 1733 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1726 1734 ],
1727 1735 b'-c|-m|FILE')
1728 1736 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
1729 1737 """Benchmark writing a series of revisions to a revlog.
1730 1738
1731 1739 Possible source values are:
1732 1740 * `full`: add from a full text (default).
1733 1741 * `parent-1`: add from a delta to the first parent
1734 1742 * `parent-2`: add from a delta to the second parent if it exists
1735 1743 (use a delta from the first parent otherwise)
1736 1744 * `parent-smallest`: add from the smallest delta (either p1 or p2)
1737 1745 * `storage`: add from the existing precomputed deltas
1738 1746 """
1739 1747 opts = _byteskwargs(opts)
1740 1748
1741 1749 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
1742 1750 rllen = getlen(ui)(rl)
1743 1751 if startrev < 0:
1744 1752 startrev = rllen + startrev
1745 1753 if stoprev < 0:
1746 1754 stoprev = rllen + stoprev
1747 1755
1748 1756 lazydeltabase = opts['lazydeltabase']
1749 1757 source = opts['source']
1750 1758 clearcaches = opts['clear_caches']
1751 1759 validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest',
1752 1760 b'storage')
1753 1761 if source not in validsource:
1754 1762 raise error.Abort('invalid source type: %s' % source)
1755 1763
1756 1764 ### actually gather results
1757 1765 count = opts['count']
1758 1766 if count <= 0:
1759 1767 raise error.Abort('invalide run count: %d' % count)
1760 1768 allresults = []
1761 1769 for c in range(count):
1762 1770 timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1,
1763 1771 lazydeltabase=lazydeltabase,
1764 1772 clearcaches=clearcaches)
1765 1773 allresults.append(timing)
1766 1774
1767 1775 ### consolidate the results in a single list
1768 1776 results = []
1769 1777 for idx, (rev, t) in enumerate(allresults[0]):
1770 1778 ts = [t]
1771 1779 for other in allresults[1:]:
1772 1780 orev, ot = other[idx]
1773 1781 assert orev == rev
1774 1782 ts.append(ot)
1775 1783 results.append((rev, ts))
1776 1784 resultcount = len(results)
1777 1785
1778 1786 ### Compute and display relevant statistics
1779 1787
1780 1788 # get a formatter
1781 1789 fm = ui.formatter(b'perf', opts)
1782 1790 displayall = ui.configbool(b"perf", b"all-timing", False)
1783 1791
1784 1792 # print individual details if requested
1785 1793 if opts['details']:
1786 1794 for idx, item in enumerate(results, 1):
1787 1795 rev, data = item
1788 1796 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
1789 1797 formatone(fm, data, title=title, displayall=displayall)
1790 1798
1791 1799 # sorts results by median time
1792 1800 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
1793 1801 # list of (name, index) to display)
1794 1802 relevants = [
1795 1803 ("min", 0),
1796 1804 ("10%", resultcount * 10 // 100),
1797 1805 ("25%", resultcount * 25 // 100),
1798 1806 ("50%", resultcount * 70 // 100),
1799 1807 ("75%", resultcount * 75 // 100),
1800 1808 ("90%", resultcount * 90 // 100),
1801 1809 ("95%", resultcount * 95 // 100),
1802 1810 ("99%", resultcount * 99 // 100),
1803 1811 ("99.9%", resultcount * 999 // 1000),
1804 1812 ("99.99%", resultcount * 9999 // 10000),
1805 1813 ("99.999%", resultcount * 99999 // 100000),
1806 1814 ("max", -1),
1807 1815 ]
1808 1816 if not ui.quiet:
1809 1817 for name, idx in relevants:
1810 1818 data = results[idx]
1811 1819 title = '%s of %d, rev %d' % (name, resultcount, data[0])
1812 1820 formatone(fm, data[1], title=title, displayall=displayall)
1813 1821
1814 1822 # XXX summing that many float will not be very precise, we ignore this fact
1815 1823 # for now
1816 1824 totaltime = []
1817 1825 for item in allresults:
1818 1826 totaltime.append((sum(x[1][0] for x in item),
1819 1827 sum(x[1][1] for x in item),
1820 1828 sum(x[1][2] for x in item),)
1821 1829 )
1822 1830 formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
1823 1831 displayall=displayall)
1824 1832 fm.end()
1825 1833
1826 1834 class _faketr(object):
1827 1835 def add(s, x, y, z=None):
1828 1836 return None
1829 1837
1830 1838 def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None,
1831 1839 lazydeltabase=True, clearcaches=True):
1832 1840 timings = []
1833 1841 tr = _faketr()
1834 1842 with _temprevlog(ui, orig, startrev) as dest:
1835 1843 dest._lazydeltabase = lazydeltabase
1836 1844 revs = list(orig.revs(startrev, stoprev))
1837 1845 total = len(revs)
1838 1846 topic = 'adding'
1839 1847 if runidx is not None:
1840 1848 topic += ' (run #%d)' % runidx
1841 1849 # Support both old and new progress API
1842 1850 if util.safehasattr(ui, 'makeprogress'):
1843 1851 progress = ui.makeprogress(topic, unit='revs', total=total)
1844 1852 def updateprogress(pos):
1845 1853 progress.update(pos)
1846 1854 def completeprogress():
1847 1855 progress.complete()
1848 1856 else:
1849 1857 def updateprogress(pos):
1850 1858 ui.progress(topic, pos, unit='revs', total=total)
1851 1859 def completeprogress():
1852 1860 ui.progress(topic, None, unit='revs', total=total)
1853 1861
1854 1862 for idx, rev in enumerate(revs):
1855 1863 updateprogress(idx)
1856 1864 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
1857 1865 if clearcaches:
1858 1866 dest.index.clearcaches()
1859 1867 dest.clearcaches()
1860 1868 with timeone() as r:
1861 1869 dest.addrawrevision(*addargs, **addkwargs)
1862 1870 timings.append((rev, r[0]))
1863 1871 updateprogress(total)
1864 1872 completeprogress()
1865 1873 return timings
1866 1874
1867 1875 def _getrevisionseed(orig, rev, tr, source):
1868 1876 from mercurial.node import nullid
1869 1877
1870 1878 linkrev = orig.linkrev(rev)
1871 1879 node = orig.node(rev)
1872 1880 p1, p2 = orig.parents(node)
1873 1881 flags = orig.flags(rev)
1874 1882 cachedelta = None
1875 1883 text = None
1876 1884
1877 1885 if source == b'full':
1878 1886 text = orig.revision(rev)
1879 1887 elif source == b'parent-1':
1880 1888 baserev = orig.rev(p1)
1881 1889 cachedelta = (baserev, orig.revdiff(p1, rev))
1882 1890 elif source == b'parent-2':
1883 1891 parent = p2
1884 1892 if p2 == nullid:
1885 1893 parent = p1
1886 1894 baserev = orig.rev(parent)
1887 1895 cachedelta = (baserev, orig.revdiff(parent, rev))
1888 1896 elif source == b'parent-smallest':
1889 1897 p1diff = orig.revdiff(p1, rev)
1890 1898 parent = p1
1891 1899 diff = p1diff
1892 1900 if p2 != nullid:
1893 1901 p2diff = orig.revdiff(p2, rev)
1894 1902 if len(p1diff) > len(p2diff):
1895 1903 parent = p2
1896 1904 diff = p2diff
1897 1905 baserev = orig.rev(parent)
1898 1906 cachedelta = (baserev, diff)
1899 1907 elif source == b'storage':
1900 1908 baserev = orig.deltaparent(rev)
1901 1909 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
1902 1910
1903 1911 return ((text, tr, linkrev, p1, p2),
1904 1912 {'node': node, 'flags': flags, 'cachedelta': cachedelta})
1905 1913
1906 1914 @contextlib.contextmanager
1907 1915 def _temprevlog(ui, orig, truncaterev):
1908 1916 from mercurial import vfs as vfsmod
1909 1917
1910 1918 if orig._inline:
1911 1919 raise error.Abort('not supporting inline revlog (yet)')
1912 1920
1913 1921 origindexpath = orig.opener.join(orig.indexfile)
1914 1922 origdatapath = orig.opener.join(orig.datafile)
1915 1923 indexname = 'revlog.i'
1916 1924 dataname = 'revlog.d'
1917 1925
1918 1926 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
1919 1927 try:
1920 1928 # copy the data file in a temporary directory
1921 1929 ui.debug('copying data in %s\n' % tmpdir)
1922 1930 destindexpath = os.path.join(tmpdir, 'revlog.i')
1923 1931 destdatapath = os.path.join(tmpdir, 'revlog.d')
1924 1932 shutil.copyfile(origindexpath, destindexpath)
1925 1933 shutil.copyfile(origdatapath, destdatapath)
1926 1934
1927 1935 # remove the data we want to add again
1928 1936 ui.debug('truncating data to be rewritten\n')
1929 1937 with open(destindexpath, 'ab') as index:
1930 1938 index.seek(0)
1931 1939 index.truncate(truncaterev * orig._io.size)
1932 1940 with open(destdatapath, 'ab') as data:
1933 1941 data.seek(0)
1934 1942 data.truncate(orig.start(truncaterev))
1935 1943
1936 1944 # instantiate a new revlog from the temporary copy
1937 1945 ui.debug('truncating adding to be rewritten\n')
1938 1946 vfs = vfsmod.vfs(tmpdir)
1939 1947 vfs.options = getattr(orig.opener, 'options', None)
1940 1948
1941 1949 dest = revlog.revlog(vfs,
1942 1950 indexfile=indexname,
1943 1951 datafile=dataname)
1944 1952 if dest._inline:
1945 1953 raise error.Abort('not supporting inline revlog (yet)')
1946 1954 # make sure internals are initialized
1947 1955 dest.revision(len(dest) - 1)
1948 1956 yield dest
1949 1957 del dest, vfs
1950 1958 finally:
1951 1959 shutil.rmtree(tmpdir, True)
1952 1960
1953 1961 @command(b'perfrevlogchunks', revlogopts + formatteropts +
1954 1962 [(b'e', b'engines', b'', b'compression engines to use'),
1955 1963 (b's', b'startrev', 0, b'revision to start at')],
1956 1964 b'-c|-m|FILE')
1957 1965 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
1958 1966 """Benchmark operations on revlog chunks.
1959 1967
1960 1968 Logically, each revlog is a collection of fulltext revisions. However,
1961 1969 stored within each revlog are "chunks" of possibly compressed data. This
1962 1970 data needs to be read and decompressed or compressed and written.
1963 1971
1964 1972 This command measures the time it takes to read+decompress and recompress
1965 1973 chunks in a revlog. It effectively isolates I/O and compression performance.
1966 1974 For measurements of higher-level operations like resolving revisions,
1967 1975 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
1968 1976 """
1969 1977 opts = _byteskwargs(opts)
1970 1978
1971 1979 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
1972 1980
1973 1981 # _chunkraw was renamed to _getsegmentforrevs.
1974 1982 try:
1975 1983 segmentforrevs = rl._getsegmentforrevs
1976 1984 except AttributeError:
1977 1985 segmentforrevs = rl._chunkraw
1978 1986
1979 1987 # Verify engines argument.
1980 1988 if engines:
1981 1989 engines = set(e.strip() for e in engines.split(b','))
1982 1990 for engine in engines:
1983 1991 try:
1984 1992 util.compressionengines[engine]
1985 1993 except KeyError:
1986 1994 raise error.Abort(b'unknown compression engine: %s' % engine)
1987 1995 else:
1988 1996 engines = []
1989 1997 for e in util.compengines:
1990 1998 engine = util.compengines[e]
1991 1999 try:
1992 2000 if engine.available():
1993 2001 engine.revlogcompressor().compress(b'dummy')
1994 2002 engines.append(e)
1995 2003 except NotImplementedError:
1996 2004 pass
1997 2005
1998 2006 revs = list(rl.revs(startrev, len(rl) - 1))
1999 2007
2000 2008 def rlfh(rl):
2001 2009 if rl._inline:
2002 2010 return getsvfs(repo)(rl.indexfile)
2003 2011 else:
2004 2012 return getsvfs(repo)(rl.datafile)
2005 2013
2006 2014 def doread():
2007 2015 rl.clearcaches()
2008 2016 for rev in revs:
2009 2017 segmentforrevs(rev, rev)
2010 2018
2011 2019 def doreadcachedfh():
2012 2020 rl.clearcaches()
2013 2021 fh = rlfh(rl)
2014 2022 for rev in revs:
2015 2023 segmentforrevs(rev, rev, df=fh)
2016 2024
2017 2025 def doreadbatch():
2018 2026 rl.clearcaches()
2019 2027 segmentforrevs(revs[0], revs[-1])
2020 2028
2021 2029 def doreadbatchcachedfh():
2022 2030 rl.clearcaches()
2023 2031 fh = rlfh(rl)
2024 2032 segmentforrevs(revs[0], revs[-1], df=fh)
2025 2033
2026 2034 def dochunk():
2027 2035 rl.clearcaches()
2028 2036 fh = rlfh(rl)
2029 2037 for rev in revs:
2030 2038 rl._chunk(rev, df=fh)
2031 2039
2032 2040 chunks = [None]
2033 2041
2034 2042 def dochunkbatch():
2035 2043 rl.clearcaches()
2036 2044 fh = rlfh(rl)
2037 2045 # Save chunks as a side-effect.
2038 2046 chunks[0] = rl._chunks(revs, df=fh)
2039 2047
2040 2048 def docompress(compressor):
2041 2049 rl.clearcaches()
2042 2050
2043 2051 try:
2044 2052 # Swap in the requested compression engine.
2045 2053 oldcompressor = rl._compressor
2046 2054 rl._compressor = compressor
2047 2055 for chunk in chunks[0]:
2048 2056 rl.compress(chunk)
2049 2057 finally:
2050 2058 rl._compressor = oldcompressor
2051 2059
2052 2060 benches = [
2053 2061 (lambda: doread(), b'read'),
2054 2062 (lambda: doreadcachedfh(), b'read w/ reused fd'),
2055 2063 (lambda: doreadbatch(), b'read batch'),
2056 2064 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
2057 2065 (lambda: dochunk(), b'chunk'),
2058 2066 (lambda: dochunkbatch(), b'chunk batch'),
2059 2067 ]
2060 2068
2061 2069 for engine in sorted(engines):
2062 2070 compressor = util.compengines[engine].revlogcompressor()
2063 2071 benches.append((functools.partial(docompress, compressor),
2064 2072 b'compress w/ %s' % engine))
2065 2073
2066 2074 for fn, title in benches:
2067 2075 timer, fm = gettimer(ui, opts)
2068 2076 timer(fn, title=title)
2069 2077 fm.end()
2070 2078
2071 2079 @command(b'perfrevlogrevision', revlogopts + formatteropts +
2072 2080 [(b'', b'cache', False, b'use caches instead of clearing')],
2073 2081 b'-c|-m|FILE REV')
2074 2082 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
2075 2083 """Benchmark obtaining a revlog revision.
2076 2084
2077 2085 Obtaining a revlog revision consists of roughly the following steps:
2078 2086
2079 2087 1. Compute the delta chain
2080 2088 2. Slice the delta chain if applicable
2081 2089 3. Obtain the raw chunks for that delta chain
2082 2090 4. Decompress each raw chunk
2083 2091 5. Apply binary patches to obtain fulltext
2084 2092 6. Verify hash of fulltext
2085 2093
2086 2094 This command measures the time spent in each of these phases.
2087 2095 """
2088 2096 opts = _byteskwargs(opts)
2089 2097
2090 2098 if opts.get(b'changelog') or opts.get(b'manifest'):
2091 2099 file_, rev = None, file_
2092 2100 elif rev is None:
2093 2101 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
2094 2102
2095 2103 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
2096 2104
2097 2105 # _chunkraw was renamed to _getsegmentforrevs.
2098 2106 try:
2099 2107 segmentforrevs = r._getsegmentforrevs
2100 2108 except AttributeError:
2101 2109 segmentforrevs = r._chunkraw
2102 2110
2103 2111 node = r.lookup(rev)
2104 2112 rev = r.rev(node)
2105 2113
2106 2114 def getrawchunks(data, chain):
2107 2115 start = r.start
2108 2116 length = r.length
2109 2117 inline = r._inline
2110 2118 iosize = r._io.size
2111 2119 buffer = util.buffer
2112 2120
2113 2121 chunks = []
2114 2122 ladd = chunks.append
2115 2123 for idx, item in enumerate(chain):
2116 2124 offset = start(item[0])
2117 2125 bits = data[idx]
2118 2126 for rev in item:
2119 2127 chunkstart = start(rev)
2120 2128 if inline:
2121 2129 chunkstart += (rev + 1) * iosize
2122 2130 chunklength = length(rev)
2123 2131 ladd(buffer(bits, chunkstart - offset, chunklength))
2124 2132
2125 2133 return chunks
2126 2134
2127 2135 def dodeltachain(rev):
2128 2136 if not cache:
2129 2137 r.clearcaches()
2130 2138 r._deltachain(rev)
2131 2139
2132 2140 def doread(chain):
2133 2141 if not cache:
2134 2142 r.clearcaches()
2135 2143 for item in slicedchain:
2136 2144 segmentforrevs(item[0], item[-1])
2137 2145
2138 2146 def doslice(r, chain, size):
2139 2147 for s in slicechunk(r, chain, targetsize=size):
2140 2148 pass
2141 2149
2142 2150 def dorawchunks(data, chain):
2143 2151 if not cache:
2144 2152 r.clearcaches()
2145 2153 getrawchunks(data, chain)
2146 2154
2147 2155 def dodecompress(chunks):
2148 2156 decomp = r.decompress
2149 2157 for chunk in chunks:
2150 2158 decomp(chunk)
2151 2159
2152 2160 def dopatch(text, bins):
2153 2161 if not cache:
2154 2162 r.clearcaches()
2155 2163 mdiff.patches(text, bins)
2156 2164
2157 2165 def dohash(text):
2158 2166 if not cache:
2159 2167 r.clearcaches()
2160 2168 r.checkhash(text, node, rev=rev)
2161 2169
2162 2170 def dorevision():
2163 2171 if not cache:
2164 2172 r.clearcaches()
2165 2173 r.revision(node)
2166 2174
2167 2175 try:
2168 2176 from mercurial.revlogutils.deltas import slicechunk
2169 2177 except ImportError:
2170 2178 slicechunk = getattr(revlog, '_slicechunk', None)
2171 2179
2172 2180 size = r.length(rev)
2173 2181 chain = r._deltachain(rev)[0]
2174 2182 if not getattr(r, '_withsparseread', False):
2175 2183 slicedchain = (chain,)
2176 2184 else:
2177 2185 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
2178 2186 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
2179 2187 rawchunks = getrawchunks(data, slicedchain)
2180 2188 bins = r._chunks(chain)
2181 2189 text = bytes(bins[0])
2182 2190 bins = bins[1:]
2183 2191 text = mdiff.patches(text, bins)
2184 2192
2185 2193 benches = [
2186 2194 (lambda: dorevision(), b'full'),
2187 2195 (lambda: dodeltachain(rev), b'deltachain'),
2188 2196 (lambda: doread(chain), b'read'),
2189 2197 ]
2190 2198
2191 2199 if getattr(r, '_withsparseread', False):
2192 2200 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
2193 2201 benches.append(slicing)
2194 2202
2195 2203 benches.extend([
2196 2204 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
2197 2205 (lambda: dodecompress(rawchunks), b'decompress'),
2198 2206 (lambda: dopatch(text, bins), b'patch'),
2199 2207 (lambda: dohash(text), b'hash'),
2200 2208 ])
2201 2209
2202 2210 timer, fm = gettimer(ui, opts)
2203 2211 for fn, title in benches:
2204 2212 timer(fn, title=title)
2205 2213 fm.end()
2206 2214
2207 2215 @command(b'perfrevset',
2208 2216 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
2209 2217 (b'', b'contexts', False, b'obtain changectx for each revision')]
2210 2218 + formatteropts, b"REVSET")
2211 2219 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
2212 2220 """benchmark the execution time of a revset
2213 2221
2214 2222 Use the --clean option if need to evaluate the impact of build volatile
2215 2223 revisions set cache on the revset execution. Volatile cache hold filtered
2216 2224 and obsolete related cache."""
2217 2225 opts = _byteskwargs(opts)
2218 2226
2219 2227 timer, fm = gettimer(ui, opts)
2220 2228 def d():
2221 2229 if clear:
2222 2230 repo.invalidatevolatilesets()
2223 2231 if contexts:
2224 2232 for ctx in repo.set(expr): pass
2225 2233 else:
2226 2234 for r in repo.revs(expr): pass
2227 2235 timer(d)
2228 2236 fm.end()
2229 2237
2230 2238 @command(b'perfvolatilesets',
2231 2239 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
2232 2240 ] + formatteropts)
2233 2241 def perfvolatilesets(ui, repo, *names, **opts):
2234 2242 """benchmark the computation of various volatile set
2235 2243
2236 2244 Volatile set computes element related to filtering and obsolescence."""
2237 2245 opts = _byteskwargs(opts)
2238 2246 timer, fm = gettimer(ui, opts)
2239 2247 repo = repo.unfiltered()
2240 2248
2241 2249 def getobs(name):
2242 2250 def d():
2243 2251 repo.invalidatevolatilesets()
2244 2252 if opts[b'clear_obsstore']:
2245 2253 clearfilecache(repo, b'obsstore')
2246 2254 obsolete.getrevs(repo, name)
2247 2255 return d
2248 2256
2249 2257 allobs = sorted(obsolete.cachefuncs)
2250 2258 if names:
2251 2259 allobs = [n for n in allobs if n in names]
2252 2260
2253 2261 for name in allobs:
2254 2262 timer(getobs(name), title=name)
2255 2263
2256 2264 def getfiltered(name):
2257 2265 def d():
2258 2266 repo.invalidatevolatilesets()
2259 2267 if opts[b'clear_obsstore']:
2260 2268 clearfilecache(repo, b'obsstore')
2261 2269 repoview.filterrevs(repo, name)
2262 2270 return d
2263 2271
2264 2272 allfilter = sorted(repoview.filtertable)
2265 2273 if names:
2266 2274 allfilter = [n for n in allfilter if n in names]
2267 2275
2268 2276 for name in allfilter:
2269 2277 timer(getfiltered(name), title=name)
2270 2278 fm.end()
2271 2279
2272 2280 @command(b'perfbranchmap',
2273 2281 [(b'f', b'full', False,
2274 2282 b'Includes build time of subset'),
2275 2283 (b'', b'clear-revbranch', False,
2276 2284 b'purge the revbranch cache between computation'),
2277 2285 ] + formatteropts)
2278 2286 def perfbranchmap(ui, repo, *filternames, **opts):
2279 2287 """benchmark the update of a branchmap
2280 2288
2281 2289 This benchmarks the full repo.branchmap() call with read and write disabled
2282 2290 """
2283 2291 opts = _byteskwargs(opts)
2284 2292 full = opts.get(b"full", False)
2285 2293 clear_revbranch = opts.get(b"clear_revbranch", False)
2286 2294 timer, fm = gettimer(ui, opts)
2287 2295 def getbranchmap(filtername):
2288 2296 """generate a benchmark function for the filtername"""
2289 2297 if filtername is None:
2290 2298 view = repo
2291 2299 else:
2292 2300 view = repo.filtered(filtername)
2293 2301 def d():
2294 2302 if clear_revbranch:
2295 2303 repo.revbranchcache()._clear()
2296 2304 if full:
2297 2305 view._branchcaches.clear()
2298 2306 else:
2299 2307 view._branchcaches.pop(filtername, None)
2300 2308 view.branchmap()
2301 2309 return d
2302 2310 # add filter in smaller subset to bigger subset
2303 2311 possiblefilters = set(repoview.filtertable)
2304 2312 if filternames:
2305 2313 possiblefilters &= set(filternames)
2306 2314 subsettable = getbranchmapsubsettable()
2307 2315 allfilters = []
2308 2316 while possiblefilters:
2309 2317 for name in possiblefilters:
2310 2318 subset = subsettable.get(name)
2311 2319 if subset not in possiblefilters:
2312 2320 break
2313 2321 else:
2314 2322 assert False, b'subset cycle %s!' % possiblefilters
2315 2323 allfilters.append(name)
2316 2324 possiblefilters.remove(name)
2317 2325
2318 2326 # warm the cache
2319 2327 if not full:
2320 2328 for name in allfilters:
2321 2329 repo.filtered(name).branchmap()
2322 2330 if not filternames or b'unfiltered' in filternames:
2323 2331 # add unfiltered
2324 2332 allfilters.append(None)
2325 2333
2326 2334 branchcacheread = safeattrsetter(branchmap, b'read')
2327 2335 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
2328 2336 branchcacheread.set(lambda repo: None)
2329 2337 branchcachewrite.set(lambda bc, repo: None)
2330 2338 try:
2331 2339 for name in allfilters:
2332 2340 printname = name
2333 2341 if name is None:
2334 2342 printname = b'unfiltered'
2335 2343 timer(getbranchmap(name), title=str(printname))
2336 2344 finally:
2337 2345 branchcacheread.restore()
2338 2346 branchcachewrite.restore()
2339 2347 fm.end()
2340 2348
2341 2349 @command(b'perfbranchmapupdate', [
2342 2350 (b'', b'base', [], b'subset of revision to start from'),
2343 2351 (b'', b'target', [], b'subset of revision to end with'),
2344 2352 (b'', b'clear-caches', False, b'clear cache between each runs')
2345 2353 ] + formatteropts)
2346 2354 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
2347 2355 """benchmark branchmap update from for <base> revs to <target> revs
2348 2356
2349 2357 If `--clear-caches` is passed, the following items will be reset before
2350 2358 each update:
2351 2359 * the changelog instance and associated indexes
2352 2360 * the rev-branch-cache instance
2353 2361
2354 2362 Examples:
2355 2363
2356 2364 # update for the one last revision
2357 2365 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
2358 2366
2359 2367 $ update for change coming with a new branch
2360 2368 $ hg perfbranchmapupdate --base 'stable' --target 'default'
2361 2369 """
2362 2370 from mercurial import branchmap
2363 2371 from mercurial import repoview
2364 2372 opts = _byteskwargs(opts)
2365 2373 timer, fm = gettimer(ui, opts)
2366 2374 clearcaches = opts[b'clear_caches']
2367 2375 unfi = repo.unfiltered()
2368 2376 x = [None] # used to pass data between closure
2369 2377
2370 2378 # we use a `list` here to avoid possible side effect from smartset
2371 2379 baserevs = list(scmutil.revrange(repo, base))
2372 2380 targetrevs = list(scmutil.revrange(repo, target))
2373 2381 if not baserevs:
2374 2382 raise error.Abort(b'no revisions selected for --base')
2375 2383 if not targetrevs:
2376 2384 raise error.Abort(b'no revisions selected for --target')
2377 2385
2378 2386 # make sure the target branchmap also contains the one in the base
2379 2387 targetrevs = list(set(baserevs) | set(targetrevs))
2380 2388 targetrevs.sort()
2381 2389
2382 2390 cl = repo.changelog
2383 2391 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
2384 2392 allbaserevs.sort()
2385 2393 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
2386 2394
2387 2395 newrevs = list(alltargetrevs.difference(allbaserevs))
2388 2396 newrevs.sort()
2389 2397
2390 2398 allrevs = frozenset(unfi.changelog.revs())
2391 2399 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
2392 2400 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
2393 2401
2394 2402 def basefilter(repo, visibilityexceptions=None):
2395 2403 return basefilterrevs
2396 2404
2397 2405 def targetfilter(repo, visibilityexceptions=None):
2398 2406 return targetfilterrevs
2399 2407
2400 2408 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
2401 2409 ui.status(msg % (len(allbaserevs), len(newrevs)))
2402 2410 if targetfilterrevs:
2403 2411 msg = b'(%d revisions still filtered)\n'
2404 2412 ui.status(msg % len(targetfilterrevs))
2405 2413
2406 2414 try:
2407 2415 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
2408 2416 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
2409 2417
2410 2418 baserepo = repo.filtered(b'__perf_branchmap_update_base')
2411 2419 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
2412 2420
2413 2421 # try to find an existing branchmap to reuse
2414 2422 subsettable = getbranchmapsubsettable()
2415 2423 candidatefilter = subsettable.get(None)
2416 2424 while candidatefilter is not None:
2417 2425 candidatebm = repo.filtered(candidatefilter).branchmap()
2418 2426 if candidatebm.validfor(baserepo):
2419 2427 filtered = repoview.filterrevs(repo, candidatefilter)
2420 2428 missing = [r for r in allbaserevs if r in filtered]
2421 2429 base = candidatebm.copy()
2422 2430 base.update(baserepo, missing)
2423 2431 break
2424 2432 candidatefilter = subsettable.get(candidatefilter)
2425 2433 else:
2426 2434 # no suitable subset where found
2427 2435 base = branchmap.branchcache()
2428 2436 base.update(baserepo, allbaserevs)
2429 2437
2430 2438 def setup():
2431 2439 x[0] = base.copy()
2432 2440 if clearcaches:
2433 2441 unfi._revbranchcache = None
2434 2442 clearchangelog(repo)
2435 2443
2436 2444 def bench():
2437 2445 x[0].update(targetrepo, newrevs)
2438 2446
2439 2447 timer(bench, setup=setup)
2440 2448 fm.end()
2441 2449 finally:
2442 2450 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
2443 2451 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
2444 2452
2445 2453 @command(b'perfbranchmapload', [
2446 2454 (b'f', b'filter', b'', b'Specify repoview filter'),
2447 2455 (b'', b'list', False, b'List brachmap filter caches'),
2448 2456 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
2449 2457
2450 2458 ] + formatteropts)
2451 2459 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
2452 2460 """benchmark reading the branchmap"""
2453 2461 opts = _byteskwargs(opts)
2454 2462 clearrevlogs = opts[b'clear_revlogs']
2455 2463
2456 2464 if list:
2457 2465 for name, kind, st in repo.cachevfs.readdir(stat=True):
2458 2466 if name.startswith(b'branch2'):
2459 2467 filtername = name.partition(b'-')[2] or b'unfiltered'
2460 2468 ui.status(b'%s - %s\n'
2461 2469 % (filtername, util.bytecount(st.st_size)))
2462 2470 return
2463 2471 if not filter:
2464 2472 filter = None
2465 2473 subsettable = getbranchmapsubsettable()
2466 2474 if filter is None:
2467 2475 repo = repo.unfiltered()
2468 2476 else:
2469 2477 repo = repoview.repoview(repo, filter)
2470 2478
2471 2479 repo.branchmap() # make sure we have a relevant, up to date branchmap
2472 2480
2473 2481 currentfilter = filter
2474 2482 # try once without timer, the filter may not be cached
2475 2483 while branchmap.read(repo) is None:
2476 2484 currentfilter = subsettable.get(currentfilter)
2477 2485 if currentfilter is None:
2478 2486 raise error.Abort(b'No branchmap cached for %s repo'
2479 2487 % (filter or b'unfiltered'))
2480 2488 repo = repo.filtered(currentfilter)
2481 2489 timer, fm = gettimer(ui, opts)
2482 2490 def setup():
2483 2491 if clearrevlogs:
2484 2492 clearchangelog(repo)
2485 2493 def bench():
2486 2494 branchmap.read(repo)
2487 2495 timer(bench, setup=setup)
2488 2496 fm.end()
2489 2497
2490 2498 @command(b'perfloadmarkers')
2491 2499 def perfloadmarkers(ui, repo):
2492 2500 """benchmark the time to parse the on-disk markers for a repo
2493 2501
2494 2502 Result is the number of markers in the repo."""
2495 2503 timer, fm = gettimer(ui)
2496 2504 svfs = getsvfs(repo)
2497 2505 timer(lambda: len(obsolete.obsstore(svfs)))
2498 2506 fm.end()
2499 2507
2500 2508 @command(b'perflrucachedict', formatteropts +
2501 2509 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
2502 2510 (b'', b'mincost', 0, b'smallest cost of items in cache'),
2503 2511 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
2504 2512 (b'', b'size', 4, b'size of cache'),
2505 2513 (b'', b'gets', 10000, b'number of key lookups'),
2506 2514 (b'', b'sets', 10000, b'number of key sets'),
2507 2515 (b'', b'mixed', 10000, b'number of mixed mode operations'),
2508 2516 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
2509 2517 norepo=True)
2510 2518 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
2511 2519 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
2512 2520 opts = _byteskwargs(opts)
2513 2521
2514 2522 def doinit():
2515 2523 for i in _xrange(10000):
2516 2524 util.lrucachedict(size)
2517 2525
2518 2526 costrange = list(range(mincost, maxcost + 1))
2519 2527
2520 2528 values = []
2521 2529 for i in _xrange(size):
2522 2530 values.append(random.randint(0, _maxint))
2523 2531
2524 2532 # Get mode fills the cache and tests raw lookup performance with no
2525 2533 # eviction.
2526 2534 getseq = []
2527 2535 for i in _xrange(gets):
2528 2536 getseq.append(random.choice(values))
2529 2537
2530 2538 def dogets():
2531 2539 d = util.lrucachedict(size)
2532 2540 for v in values:
2533 2541 d[v] = v
2534 2542 for key in getseq:
2535 2543 value = d[key]
2536 2544 value # silence pyflakes warning
2537 2545
2538 2546 def dogetscost():
2539 2547 d = util.lrucachedict(size, maxcost=costlimit)
2540 2548 for i, v in enumerate(values):
2541 2549 d.insert(v, v, cost=costs[i])
2542 2550 for key in getseq:
2543 2551 try:
2544 2552 value = d[key]
2545 2553 value # silence pyflakes warning
2546 2554 except KeyError:
2547 2555 pass
2548 2556
2549 2557 # Set mode tests insertion speed with cache eviction.
2550 2558 setseq = []
2551 2559 costs = []
2552 2560 for i in _xrange(sets):
2553 2561 setseq.append(random.randint(0, _maxint))
2554 2562 costs.append(random.choice(costrange))
2555 2563
2556 2564 def doinserts():
2557 2565 d = util.lrucachedict(size)
2558 2566 for v in setseq:
2559 2567 d.insert(v, v)
2560 2568
2561 2569 def doinsertscost():
2562 2570 d = util.lrucachedict(size, maxcost=costlimit)
2563 2571 for i, v in enumerate(setseq):
2564 2572 d.insert(v, v, cost=costs[i])
2565 2573
2566 2574 def dosets():
2567 2575 d = util.lrucachedict(size)
2568 2576 for v in setseq:
2569 2577 d[v] = v
2570 2578
2571 2579 # Mixed mode randomly performs gets and sets with eviction.
2572 2580 mixedops = []
2573 2581 for i in _xrange(mixed):
2574 2582 r = random.randint(0, 100)
2575 2583 if r < mixedgetfreq:
2576 2584 op = 0
2577 2585 else:
2578 2586 op = 1
2579 2587
2580 2588 mixedops.append((op,
2581 2589 random.randint(0, size * 2),
2582 2590 random.choice(costrange)))
2583 2591
2584 2592 def domixed():
2585 2593 d = util.lrucachedict(size)
2586 2594
2587 2595 for op, v, cost in mixedops:
2588 2596 if op == 0:
2589 2597 try:
2590 2598 d[v]
2591 2599 except KeyError:
2592 2600 pass
2593 2601 else:
2594 2602 d[v] = v
2595 2603
2596 2604 def domixedcost():
2597 2605 d = util.lrucachedict(size, maxcost=costlimit)
2598 2606
2599 2607 for op, v, cost in mixedops:
2600 2608 if op == 0:
2601 2609 try:
2602 2610 d[v]
2603 2611 except KeyError:
2604 2612 pass
2605 2613 else:
2606 2614 d.insert(v, v, cost=cost)
2607 2615
2608 2616 benches = [
2609 2617 (doinit, b'init'),
2610 2618 ]
2611 2619
2612 2620 if costlimit:
2613 2621 benches.extend([
2614 2622 (dogetscost, b'gets w/ cost limit'),
2615 2623 (doinsertscost, b'inserts w/ cost limit'),
2616 2624 (domixedcost, b'mixed w/ cost limit'),
2617 2625 ])
2618 2626 else:
2619 2627 benches.extend([
2620 2628 (dogets, b'gets'),
2621 2629 (doinserts, b'inserts'),
2622 2630 (dosets, b'sets'),
2623 2631 (domixed, b'mixed')
2624 2632 ])
2625 2633
2626 2634 for fn, title in benches:
2627 2635 timer, fm = gettimer(ui, opts)
2628 2636 timer(fn, title=title)
2629 2637 fm.end()
2630 2638
2631 2639 @command(b'perfwrite', formatteropts)
2632 2640 def perfwrite(ui, repo, **opts):
2633 2641 """microbenchmark ui.write
2634 2642 """
2635 2643 opts = _byteskwargs(opts)
2636 2644
2637 2645 timer, fm = gettimer(ui, opts)
2638 2646 def write():
2639 2647 for i in range(100000):
2640 2648 ui.write((b'Testing write performance\n'))
2641 2649 timer(write)
2642 2650 fm.end()
2643 2651
2644 2652 def uisetup(ui):
2645 2653 if (util.safehasattr(cmdutil, b'openrevlog') and
2646 2654 not util.safehasattr(commands, b'debugrevlogopts')):
2647 2655 # for "historical portability":
2648 2656 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
2649 2657 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
2650 2658 # openrevlog() should cause failure, because it has been
2651 2659 # available since 3.5 (or 49c583ca48c4).
2652 2660 def openrevlog(orig, repo, cmd, file_, opts):
2653 2661 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
2654 2662 raise error.Abort(b"This version doesn't support --dir option",
2655 2663 hint=b"use 3.5 or later")
2656 2664 return orig(repo, cmd, file_, opts)
2657 2665 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
2658 2666
2659 2667 @command(b'perfprogress', formatteropts + [
2660 2668 (b'', b'topic', b'topic', b'topic for progress messages'),
2661 2669 (b'c', b'total', 1000000, b'total value we are progressing to'),
2662 2670 ], norepo=True)
2663 2671 def perfprogress(ui, topic=None, total=None, **opts):
2664 2672 """printing of progress bars"""
2665 2673 opts = _byteskwargs(opts)
2666 2674
2667 2675 timer, fm = gettimer(ui, opts)
2668 2676
2669 2677 def doprogress():
2670 2678 with ui.makeprogress(topic, total=total) as progress:
2671 2679 for i in pycompat.xrange(total):
2672 2680 progress.increment()
2673 2681
2674 2682 timer(doprogress)
2675 2683 fm.end()
@@ -1,300 +1,300
1 1 #require test-repo
2 2
3 3 Set vars:
4 4
5 5 $ . "$TESTDIR/helpers-testrepo.sh"
6 6 $ CONTRIBDIR="$TESTDIR/../contrib"
7 7
8 8 Prepare repo:
9 9
10 10 $ hg init
11 11
12 12 $ echo this is file a > a
13 13 $ hg add a
14 14 $ hg commit -m first
15 15
16 16 $ echo adding to file a >> a
17 17 $ hg commit -m second
18 18
19 19 $ echo adding more to file a >> a
20 20 $ hg commit -m third
21 21
22 22 $ hg up -r 0
23 23 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
24 24 $ echo merge-this >> a
25 25 $ hg commit -m merge-able
26 26 created new head
27 27
28 28 $ hg up -r 2
29 29 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
30 30
31 31 perfstatus
32 32
33 33 $ cat >> $HGRCPATH << EOF
34 34 > [extensions]
35 35 > perfstatusext=$CONTRIBDIR/perf.py
36 36 > [perf]
37 37 > presleep=0
38 38 > stub=on
39 39 > parentscount=1
40 40 > EOF
41 41 $ hg help perfstatusext
42 42 perfstatusext extension - helper extension to measure performance
43 43
44 44 list of commands:
45 45
46 46 perfaddremove
47 47 (no help text available)
48 48 perfancestors
49 49 (no help text available)
50 50 perfancestorset
51 51 (no help text available)
52 52 perfannotate (no help text available)
53 53 perfbdiff benchmark a bdiff between revisions
54 54 perfbookmarks
55 55 benchmark parsing bookmarks from disk to memory
56 56 perfbranchmap
57 57 benchmark the update of a branchmap
58 58 perfbranchmapload
59 59 benchmark reading the branchmap
60 60 perfbranchmapupdate
61 61 benchmark branchmap update from for <base> revs to <target>
62 62 revs
63 63 perfbundleread
64 64 Benchmark reading of bundle files.
65 65 perfcca (no help text available)
66 66 perfchangegroupchangelog
67 67 Benchmark producing a changelog group for a changegroup.
68 68 perfchangeset
69 69 (no help text available)
70 70 perfctxfiles (no help text available)
71 71 perfdiffwd Profile diff of working directory changes
72 72 perfdirfoldmap
73 73 (no help text available)
74 74 perfdirs (no help text available)
75 75 perfdirstate (no help text available)
76 76 perfdirstatedirs
77 77 (no help text available)
78 78 perfdirstatefoldmap
79 79 (no help text available)
80 80 perfdirstatewrite
81 81 (no help text available)
82 82 perfdiscovery
83 83 benchmark discovery between local repo and the peer at given
84 84 path
85 85 perffncacheencode
86 86 (no help text available)
87 87 perffncacheload
88 88 (no help text available)
89 89 perffncachewrite
90 90 (no help text available)
91 91 perfheads benchmark the computation of a changelog heads
92 92 perfhelper-pathcopies
93 93 find statistic about potential parameters for the
94 94 'perftracecopies'
95 95 perfignore benchmark operation related to computing ignore
96 perfindex (no help text available)
96 perfindex benchmark index creation time followed by a lookup
97 97 perflinelogedits
98 98 (no help text available)
99 99 perfloadmarkers
100 100 benchmark the time to parse the on-disk markers for a repo
101 101 perflog (no help text available)
102 102 perflookup (no help text available)
103 103 perflrucachedict
104 104 (no help text available)
105 105 perfmanifest benchmark the time to read a manifest from disk and return a
106 106 usable
107 107 perfmergecalculate
108 108 (no help text available)
109 109 perfmoonwalk benchmark walking the changelog backwards
110 110 perfnodelookup
111 111 (no help text available)
112 112 perfparents (no help text available)
113 113 perfpathcopies
114 114 benchmark the copy tracing logic
115 115 perfphases benchmark phasesets computation
116 116 perfphasesremote
117 117 benchmark time needed to analyse phases of the remote server
118 118 perfprogress printing of progress bars
119 119 perfrawfiles (no help text available)
120 120 perfrevlogchunks
121 121 Benchmark operations on revlog chunks.
122 122 perfrevlogindex
123 123 Benchmark operations against a revlog index.
124 124 perfrevlogrevision
125 125 Benchmark obtaining a revlog revision.
126 126 perfrevlogrevisions
127 127 Benchmark reading a series of revisions from a revlog.
128 128 perfrevlogwrite
129 129 Benchmark writing a series of revisions to a revlog.
130 130 perfrevrange (no help text available)
131 131 perfrevset benchmark the execution time of a revset
132 132 perfstartup (no help text available)
133 133 perfstatus (no help text available)
134 134 perftags (no help text available)
135 135 perftemplating
136 136 test the rendering time of a given template
137 137 perfunidiff benchmark a unified diff between revisions
138 138 perfvolatilesets
139 139 benchmark the computation of various volatile set
140 140 perfwalk (no help text available)
141 141 perfwrite microbenchmark ui.write
142 142
143 143 (use 'hg help -v perfstatusext' to show built-in aliases and global options)
144 144 $ hg perfaddremove
145 145 $ hg perfancestors
146 146 $ hg perfancestorset 2
147 147 $ hg perfannotate a
148 148 $ hg perfbdiff -c 1
149 149 $ hg perfbdiff --alldata 1
150 150 $ hg perfunidiff -c 1
151 151 $ hg perfunidiff --alldata 1
152 152 $ hg perfbookmarks
153 153 $ hg perfbranchmap
154 154 $ hg perfbranchmapload
155 155 $ hg perfbranchmapupdate --base "not tip" --target "tip"
156 156 benchmark of branchmap with 3 revisions with 1 new ones
157 157 $ hg perfcca
158 158 $ hg perfchangegroupchangelog
159 159 $ hg perfchangegroupchangelog --cgversion 01
160 160 $ hg perfchangeset 2
161 161 $ hg perfctxfiles 2
162 162 $ hg perfdiffwd
163 163 $ hg perfdirfoldmap
164 164 $ hg perfdirs
165 165 $ hg perfdirstate
166 166 $ hg perfdirstatedirs
167 167 $ hg perfdirstatefoldmap
168 168 $ hg perfdirstatewrite
169 169 #if repofncache
170 170 $ hg perffncacheencode
171 171 $ hg perffncacheload
172 172 $ hg debugrebuildfncache
173 173 fncache already up to date
174 174 $ hg perffncachewrite
175 175 $ hg debugrebuildfncache
176 176 fncache already up to date
177 177 #endif
178 178 $ hg perfheads
179 179 $ hg perfignore
180 180 $ hg perfindex
181 181 $ hg perflinelogedits -n 1
182 182 $ hg perfloadmarkers
183 183 $ hg perflog
184 184 $ hg perflookup 2
185 185 $ hg perflrucache
186 186 $ hg perfmanifest 2
187 187 $ hg perfmanifest -m 44fe2c8352bb3a478ffd7d8350bbc721920134d1
188 188 $ hg perfmanifest -m 44fe2c8352bb
189 189 abort: manifest revision must be integer or full node
190 190 [255]
191 191 $ hg perfmergecalculate -r 3
192 192 $ hg perfmoonwalk
193 193 $ hg perfnodelookup 2
194 194 $ hg perfpathcopies 1 2
195 195 $ hg perfprogress --total 1000
196 196 $ hg perfrawfiles 2
197 197 $ hg perfrevlogindex -c
198 198 #if reporevlogstore
199 199 $ hg perfrevlogrevisions .hg/store/data/a.i
200 200 #endif
201 201 $ hg perfrevlogrevision -m 0
202 202 $ hg perfrevlogchunks -c
203 203 $ hg perfrevrange
204 204 $ hg perfrevset 'all()'
205 205 $ hg perfstartup
206 206 $ hg perfstatus
207 207 $ hg perftags
208 208 $ hg perftemplating
209 209 $ hg perfvolatilesets
210 210 $ hg perfwalk
211 211 $ hg perfparents
212 212 $ hg perfdiscovery -q .
213 213
214 214 test actual output
215 215 ------------------
216 216
217 217 normal output:
218 218
219 219 $ hg perfheads --config perf.stub=no
220 220 ! wall * comb * user * sys * (best of *) (glob)
221 221
222 222 detailed output:
223 223
224 224 $ hg perfheads --config perf.all-timing=yes --config perf.stub=no
225 225 ! wall * comb * user * sys * (best of *) (glob)
226 226 ! wall * comb * user * sys * (max of *) (glob)
227 227 ! wall * comb * user * sys * (avg of *) (glob)
228 228 ! wall * comb * user * sys * (median of *) (glob)
229 229
230 230 test json output
231 231 ----------------
232 232
233 233 normal output:
234 234
235 235 $ hg perfheads --template json --config perf.stub=no
236 236 [
237 237 {
238 238 "comb": *, (glob)
239 239 "count": *, (glob)
240 240 "sys": *, (glob)
241 241 "user": *, (glob)
242 242 "wall": * (glob)
243 243 }
244 244 ]
245 245
246 246 detailed output:
247 247
248 248 $ hg perfheads --template json --config perf.all-timing=yes --config perf.stub=no
249 249 [
250 250 {
251 251 "avg.comb": *, (glob)
252 252 "avg.count": *, (glob)
253 253 "avg.sys": *, (glob)
254 254 "avg.user": *, (glob)
255 255 "avg.wall": *, (glob)
256 256 "comb": *, (glob)
257 257 "count": *, (glob)
258 258 "max.comb": *, (glob)
259 259 "max.count": *, (glob)
260 260 "max.sys": *, (glob)
261 261 "max.user": *, (glob)
262 262 "max.wall": *, (glob)
263 263 "median.comb": *, (glob)
264 264 "median.count": *, (glob)
265 265 "median.sys": *, (glob)
266 266 "median.user": *, (glob)
267 267 "median.wall": *, (glob)
268 268 "sys": *, (glob)
269 269 "user": *, (glob)
270 270 "wall": * (glob)
271 271 }
272 272 ]
273 273
274 274 Check perf.py for historical portability
275 275 ----------------------------------------
276 276
277 277 $ cd "$TESTDIR/.."
278 278
279 279 $ (testrepohg files -r 1.2 glob:mercurial/*.c glob:mercurial/*.py;
280 280 > testrepohg files -r tip glob:mercurial/*.c glob:mercurial/*.py) |
281 281 > "$TESTDIR"/check-perf-code.py contrib/perf.py
282 282 contrib/perf.py:\d+: (re)
283 283 > from mercurial import (
284 284 import newer module separately in try clause for early Mercurial
285 285 contrib/perf.py:\d+: (re)
286 286 > from mercurial import (
287 287 import newer module separately in try clause for early Mercurial
288 288 contrib/perf.py:\d+: (re)
289 289 > origindexpath = orig.opener.join(orig.indexfile)
290 290 use getvfs()/getsvfs() for early Mercurial
291 291 contrib/perf.py:\d+: (re)
292 292 > origdatapath = orig.opener.join(orig.datafile)
293 293 use getvfs()/getsvfs() for early Mercurial
294 294 contrib/perf.py:\d+: (re)
295 295 > vfs = vfsmod.vfs(tmpdir)
296 296 use getvfs()/getsvfs() for early Mercurial
297 297 contrib/perf.py:\d+: (re)
298 298 > vfs.options = getattr(orig.opener, 'options', None)
299 299 use getvfs()/getsvfs() for early Mercurial
300 300 [1]
General Comments 0
You need to be logged in to leave comments. Login now