##// END OF EJS Templates
perf: add a new `perfhelper-tracecopies` command...
Boris Feld -
r40727:a65fe13d default
parent child Browse files
Show More
@@ -1,2372 +1,2420
1 1 # perf.py - performance test routines
2 2 '''helper extension to measure performance'''
3 3
4 4 # "historical portability" policy of perf.py:
5 5 #
6 6 # We have to do:
7 7 # - make perf.py "loadable" with as wide Mercurial version as possible
8 8 # This doesn't mean that perf commands work correctly with that Mercurial.
9 9 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
10 10 # - make historical perf command work correctly with as wide Mercurial
11 11 # version as possible
12 12 #
13 13 # We have to do, if possible with reasonable cost:
14 14 # - make recent perf command for historical feature work correctly
15 15 # with early Mercurial
16 16 #
17 17 # We don't have to do:
18 18 # - make perf command for recent feature work correctly with early
19 19 # Mercurial
20 20
21 21 from __future__ import absolute_import
22 22 import contextlib
23 23 import functools
24 24 import gc
25 25 import os
26 26 import random
27 27 import shutil
28 28 import struct
29 29 import sys
30 30 import tempfile
31 31 import threading
32 32 import time
33 33 from mercurial import (
34 34 changegroup,
35 35 cmdutil,
36 36 commands,
37 37 copies,
38 38 error,
39 39 extensions,
40 40 mdiff,
41 41 merge,
42 42 revlog,
43 43 util,
44 44 )
45 45
46 46 # for "historical portability":
47 47 # try to import modules separately (in dict order), and ignore
48 48 # failure, because these aren't available with early Mercurial
49 49 try:
50 50 from mercurial import branchmap # since 2.5 (or bcee63733aad)
51 51 except ImportError:
52 52 pass
53 53 try:
54 54 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
55 55 except ImportError:
56 56 pass
57 57 try:
58 58 from mercurial import registrar # since 3.7 (or 37d50250b696)
59 59 dir(registrar) # forcibly load it
60 60 except ImportError:
61 61 registrar = None
62 62 try:
63 63 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
64 64 except ImportError:
65 65 pass
66 66 try:
67 67 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
68 68 except ImportError:
69 69 pass
70 70
71 71 def identity(a):
72 72 return a
73 73
74 74 try:
75 75 from mercurial import pycompat
76 76 getargspec = pycompat.getargspec # added to module after 4.5
77 77 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
78 78 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
79 79 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
80 80 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
81 81 if pycompat.ispy3:
82 82 _maxint = sys.maxsize # per py3 docs for replacing maxint
83 83 else:
84 84 _maxint = sys.maxint
85 85 except (ImportError, AttributeError):
86 86 import inspect
87 87 getargspec = inspect.getargspec
88 88 _byteskwargs = identity
89 89 fsencode = identity # no py3 support
90 90 _maxint = sys.maxint # no py3 support
91 91 _sysstr = lambda x: x # no py3 support
92 92 _xrange = xrange
93 93
94 94 try:
95 95 # 4.7+
96 96 queue = pycompat.queue.Queue
97 97 except (AttributeError, ImportError):
98 98 # <4.7.
99 99 try:
100 100 queue = pycompat.queue
101 101 except (AttributeError, ImportError):
102 102 queue = util.queue
103 103
104 104 try:
105 105 from mercurial import logcmdutil
106 106 makelogtemplater = logcmdutil.maketemplater
107 107 except (AttributeError, ImportError):
108 108 try:
109 109 makelogtemplater = cmdutil.makelogtemplater
110 110 except (AttributeError, ImportError):
111 111 makelogtemplater = None
112 112
113 113 # for "historical portability":
114 114 # define util.safehasattr forcibly, because util.safehasattr has been
115 115 # available since 1.9.3 (or 94b200a11cf7)
116 116 _undefined = object()
117 117 def safehasattr(thing, attr):
118 118 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
119 119 setattr(util, 'safehasattr', safehasattr)
120 120
121 121 # for "historical portability":
122 122 # define util.timer forcibly, because util.timer has been available
123 123 # since ae5d60bb70c9
124 124 if safehasattr(time, 'perf_counter'):
125 125 util.timer = time.perf_counter
126 126 elif os.name == b'nt':
127 127 util.timer = time.clock
128 128 else:
129 129 util.timer = time.time
130 130
131 131 # for "historical portability":
132 132 # use locally defined empty option list, if formatteropts isn't
133 133 # available, because commands.formatteropts has been available since
134 134 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
135 135 # available since 2.2 (or ae5f92e154d3)
136 136 formatteropts = getattr(cmdutil, "formatteropts",
137 137 getattr(commands, "formatteropts", []))
138 138
139 139 # for "historical portability":
140 140 # use locally defined option list, if debugrevlogopts isn't available,
141 141 # because commands.debugrevlogopts has been available since 3.7 (or
142 142 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
143 143 # since 1.9 (or a79fea6b3e77).
144 144 revlogopts = getattr(cmdutil, "debugrevlogopts",
145 145 getattr(commands, "debugrevlogopts", [
146 146 (b'c', b'changelog', False, (b'open changelog')),
147 147 (b'm', b'manifest', False, (b'open manifest')),
148 148 (b'', b'dir', False, (b'open directory manifest')),
149 149 ]))
150 150
151 151 cmdtable = {}
152 152
153 153 # for "historical portability":
154 154 # define parsealiases locally, because cmdutil.parsealiases has been
155 155 # available since 1.5 (or 6252852b4332)
156 156 def parsealiases(cmd):
157 157 return cmd.split(b"|")
158 158
159 159 if safehasattr(registrar, 'command'):
160 160 command = registrar.command(cmdtable)
161 161 elif safehasattr(cmdutil, 'command'):
162 162 command = cmdutil.command(cmdtable)
163 163 if b'norepo' not in getargspec(command).args:
164 164 # for "historical portability":
165 165 # wrap original cmdutil.command, because "norepo" option has
166 166 # been available since 3.1 (or 75a96326cecb)
167 167 _command = command
168 168 def command(name, options=(), synopsis=None, norepo=False):
169 169 if norepo:
170 170 commands.norepo += b' %s' % b' '.join(parsealiases(name))
171 171 return _command(name, list(options), synopsis)
172 172 else:
173 173 # for "historical portability":
174 174 # define "@command" annotation locally, because cmdutil.command
175 175 # has been available since 1.9 (or 2daa5179e73f)
176 176 def command(name, options=(), synopsis=None, norepo=False):
177 177 def decorator(func):
178 178 if synopsis:
179 179 cmdtable[name] = func, list(options), synopsis
180 180 else:
181 181 cmdtable[name] = func, list(options)
182 182 if norepo:
183 183 commands.norepo += b' %s' % b' '.join(parsealiases(name))
184 184 return func
185 185 return decorator
186 186
187 187 try:
188 188 import mercurial.registrar
189 189 import mercurial.configitems
190 190 configtable = {}
191 191 configitem = mercurial.registrar.configitem(configtable)
192 192 configitem(b'perf', b'presleep',
193 193 default=mercurial.configitems.dynamicdefault,
194 194 )
195 195 configitem(b'perf', b'stub',
196 196 default=mercurial.configitems.dynamicdefault,
197 197 )
198 198 configitem(b'perf', b'parentscount',
199 199 default=mercurial.configitems.dynamicdefault,
200 200 )
201 201 configitem(b'perf', b'all-timing',
202 202 default=mercurial.configitems.dynamicdefault,
203 203 )
204 204 except (ImportError, AttributeError):
205 205 pass
206 206
207 207 def getlen(ui):
208 208 if ui.configbool(b"perf", b"stub", False):
209 209 return lambda x: 1
210 210 return len
211 211
212 212 def gettimer(ui, opts=None):
213 213 """return a timer function and formatter: (timer, formatter)
214 214
215 215 This function exists to gather the creation of formatter in a single
216 216 place instead of duplicating it in all performance commands."""
217 217
218 218 # enforce an idle period before execution to counteract power management
219 219 # experimental config: perf.presleep
220 220 time.sleep(getint(ui, b"perf", b"presleep", 1))
221 221
222 222 if opts is None:
223 223 opts = {}
224 224 # redirect all to stderr unless buffer api is in use
225 225 if not ui._buffers:
226 226 ui = ui.copy()
227 227 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
228 228 if uifout:
229 229 # for "historical portability":
230 230 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
231 231 uifout.set(ui.ferr)
232 232
233 233 # get a formatter
234 234 uiformatter = getattr(ui, 'formatter', None)
235 235 if uiformatter:
236 236 fm = uiformatter(b'perf', opts)
237 237 else:
238 238 # for "historical portability":
239 239 # define formatter locally, because ui.formatter has been
240 240 # available since 2.2 (or ae5f92e154d3)
241 241 from mercurial import node
242 242 class defaultformatter(object):
243 243 """Minimized composition of baseformatter and plainformatter
244 244 """
245 245 def __init__(self, ui, topic, opts):
246 246 self._ui = ui
247 247 if ui.debugflag:
248 248 self.hexfunc = node.hex
249 249 else:
250 250 self.hexfunc = node.short
251 251 def __nonzero__(self):
252 252 return False
253 253 __bool__ = __nonzero__
254 254 def startitem(self):
255 255 pass
256 256 def data(self, **data):
257 257 pass
258 258 def write(self, fields, deftext, *fielddata, **opts):
259 259 self._ui.write(deftext % fielddata, **opts)
260 260 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
261 261 if cond:
262 262 self._ui.write(deftext % fielddata, **opts)
263 263 def plain(self, text, **opts):
264 264 self._ui.write(text, **opts)
265 265 def end(self):
266 266 pass
267 267 fm = defaultformatter(ui, b'perf', opts)
268 268
269 269 # stub function, runs code only once instead of in a loop
270 270 # experimental config: perf.stub
271 271 if ui.configbool(b"perf", b"stub", False):
272 272 return functools.partial(stub_timer, fm), fm
273 273
274 274 # experimental config: perf.all-timing
275 275 displayall = ui.configbool(b"perf", b"all-timing", False)
276 276 return functools.partial(_timer, fm, displayall=displayall), fm
277 277
278 278 def stub_timer(fm, func, setup=None, title=None):
279 279 func()
280 280
281 281 @contextlib.contextmanager
282 282 def timeone():
283 283 r = []
284 284 ostart = os.times()
285 285 cstart = util.timer()
286 286 yield r
287 287 cstop = util.timer()
288 288 ostop = os.times()
289 289 a, b = ostart, ostop
290 290 r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
291 291
292 292 def _timer(fm, func, setup=None, title=None, displayall=False):
293 293 gc.collect()
294 294 results = []
295 295 begin = util.timer()
296 296 count = 0
297 297 while True:
298 298 if setup is not None:
299 299 setup()
300 300 with timeone() as item:
301 301 r = func()
302 302 count += 1
303 303 results.append(item[0])
304 304 cstop = util.timer()
305 305 if cstop - begin > 3 and count >= 100:
306 306 break
307 307 if cstop - begin > 10 and count >= 3:
308 308 break
309 309
310 310 formatone(fm, results, title=title, result=r,
311 311 displayall=displayall)
312 312
313 313 def formatone(fm, timings, title=None, result=None, displayall=False):
314 314
315 315 count = len(timings)
316 316
317 317 fm.startitem()
318 318
319 319 if title:
320 320 fm.write(b'title', b'! %s\n', title)
321 321 if result:
322 322 fm.write(b'result', b'! result: %s\n', result)
323 323 def display(role, entry):
324 324 prefix = b''
325 325 if role != b'best':
326 326 prefix = b'%s.' % role
327 327 fm.plain(b'!')
328 328 fm.write(prefix + b'wall', b' wall %f', entry[0])
329 329 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
330 330 fm.write(prefix + b'user', b' user %f', entry[1])
331 331 fm.write(prefix + b'sys', b' sys %f', entry[2])
332 332 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
333 333 fm.plain(b'\n')
334 334 timings.sort()
335 335 min_val = timings[0]
336 336 display(b'best', min_val)
337 337 if displayall:
338 338 max_val = timings[-1]
339 339 display(b'max', max_val)
340 340 avg = tuple([sum(x) / count for x in zip(*timings)])
341 341 display(b'avg', avg)
342 342 median = timings[len(timings) // 2]
343 343 display(b'median', median)
344 344
345 345 # utilities for historical portability
346 346
347 347 def getint(ui, section, name, default):
348 348 # for "historical portability":
349 349 # ui.configint has been available since 1.9 (or fa2b596db182)
350 350 v = ui.config(section, name, None)
351 351 if v is None:
352 352 return default
353 353 try:
354 354 return int(v)
355 355 except ValueError:
356 356 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
357 357 % (section, name, v))
358 358
359 359 def safeattrsetter(obj, name, ignoremissing=False):
360 360 """Ensure that 'obj' has 'name' attribute before subsequent setattr
361 361
362 362 This function is aborted, if 'obj' doesn't have 'name' attribute
363 363 at runtime. This avoids overlooking removal of an attribute, which
364 364 breaks assumption of performance measurement, in the future.
365 365
366 366 This function returns the object to (1) assign a new value, and
367 367 (2) restore an original value to the attribute.
368 368
369 369 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
370 370 abortion, and this function returns None. This is useful to
371 371 examine an attribute, which isn't ensured in all Mercurial
372 372 versions.
373 373 """
374 374 if not util.safehasattr(obj, name):
375 375 if ignoremissing:
376 376 return None
377 377 raise error.Abort((b"missing attribute %s of %s might break assumption"
378 378 b" of performance measurement") % (name, obj))
379 379
380 380 origvalue = getattr(obj, _sysstr(name))
381 381 class attrutil(object):
382 382 def set(self, newvalue):
383 383 setattr(obj, _sysstr(name), newvalue)
384 384 def restore(self):
385 385 setattr(obj, _sysstr(name), origvalue)
386 386
387 387 return attrutil()
388 388
389 389 # utilities to examine each internal API changes
390 390
391 391 def getbranchmapsubsettable():
392 392 # for "historical portability":
393 393 # subsettable is defined in:
394 394 # - branchmap since 2.9 (or 175c6fd8cacc)
395 395 # - repoview since 2.5 (or 59a9f18d4587)
396 396 for mod in (branchmap, repoview):
397 397 subsettable = getattr(mod, 'subsettable', None)
398 398 if subsettable:
399 399 return subsettable
400 400
401 401 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
402 402 # branchmap and repoview modules exist, but subsettable attribute
403 403 # doesn't)
404 404 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
405 405 hint=b"use 2.5 or later")
406 406
407 407 def getsvfs(repo):
408 408 """Return appropriate object to access files under .hg/store
409 409 """
410 410 # for "historical portability":
411 411 # repo.svfs has been available since 2.3 (or 7034365089bf)
412 412 svfs = getattr(repo, 'svfs', None)
413 413 if svfs:
414 414 return svfs
415 415 else:
416 416 return getattr(repo, 'sopener')
417 417
418 418 def getvfs(repo):
419 419 """Return appropriate object to access files under .hg
420 420 """
421 421 # for "historical portability":
422 422 # repo.vfs has been available since 2.3 (or 7034365089bf)
423 423 vfs = getattr(repo, 'vfs', None)
424 424 if vfs:
425 425 return vfs
426 426 else:
427 427 return getattr(repo, 'opener')
428 428
429 429 def repocleartagscachefunc(repo):
430 430 """Return the function to clear tags cache according to repo internal API
431 431 """
432 432 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
433 433 # in this case, setattr(repo, '_tagscache', None) or so isn't
434 434 # correct way to clear tags cache, because existing code paths
435 435 # expect _tagscache to be a structured object.
436 436 def clearcache():
437 437 # _tagscache has been filteredpropertycache since 2.5 (or
438 438 # 98c867ac1330), and delattr() can't work in such case
439 439 if b'_tagscache' in vars(repo):
440 440 del repo.__dict__[b'_tagscache']
441 441 return clearcache
442 442
443 443 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
444 444 if repotags: # since 1.4 (or 5614a628d173)
445 445 return lambda : repotags.set(None)
446 446
447 447 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
448 448 if repotagscache: # since 0.6 (or d7df759d0e97)
449 449 return lambda : repotagscache.set(None)
450 450
451 451 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
452 452 # this point, but it isn't so problematic, because:
453 453 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
454 454 # in perftags() causes failure soon
455 455 # - perf.py itself has been available since 1.1 (or eb240755386d)
456 456 raise error.Abort((b"tags API of this hg command is unknown"))
457 457
458 458 # utilities to clear cache
459 459
460 460 def clearfilecache(obj, attrname):
461 461 unfiltered = getattr(obj, 'unfiltered', None)
462 462 if unfiltered is not None:
463 463 obj = obj.unfiltered()
464 464 if attrname in vars(obj):
465 465 delattr(obj, attrname)
466 466 obj._filecache.pop(attrname, None)
467 467
468 468 # perf commands
469 469
470 470 @command(b'perfwalk', formatteropts)
471 471 def perfwalk(ui, repo, *pats, **opts):
472 472 opts = _byteskwargs(opts)
473 473 timer, fm = gettimer(ui, opts)
474 474 m = scmutil.match(repo[None], pats, {})
475 475 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
476 476 ignored=False))))
477 477 fm.end()
478 478
479 479 @command(b'perfannotate', formatteropts)
480 480 def perfannotate(ui, repo, f, **opts):
481 481 opts = _byteskwargs(opts)
482 482 timer, fm = gettimer(ui, opts)
483 483 fc = repo[b'.'][f]
484 484 timer(lambda: len(fc.annotate(True)))
485 485 fm.end()
486 486
487 487 @command(b'perfstatus',
488 488 [(b'u', b'unknown', False,
489 489 b'ask status to look for unknown files')] + formatteropts)
490 490 def perfstatus(ui, repo, **opts):
491 491 opts = _byteskwargs(opts)
492 492 #m = match.always(repo.root, repo.getcwd())
493 493 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
494 494 # False))))
495 495 timer, fm = gettimer(ui, opts)
496 496 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
497 497 fm.end()
498 498
499 499 @command(b'perfaddremove', formatteropts)
500 500 def perfaddremove(ui, repo, **opts):
501 501 opts = _byteskwargs(opts)
502 502 timer, fm = gettimer(ui, opts)
503 503 try:
504 504 oldquiet = repo.ui.quiet
505 505 repo.ui.quiet = True
506 506 matcher = scmutil.match(repo[None])
507 507 opts[b'dry_run'] = True
508 508 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
509 509 finally:
510 510 repo.ui.quiet = oldquiet
511 511 fm.end()
512 512
513 513 def clearcaches(cl):
514 514 # behave somewhat consistently across internal API changes
515 515 if util.safehasattr(cl, b'clearcaches'):
516 516 cl.clearcaches()
517 517 elif util.safehasattr(cl, b'_nodecache'):
518 518 from mercurial.node import nullid, nullrev
519 519 cl._nodecache = {nullid: nullrev}
520 520 cl._nodepos = None
521 521
522 522 @command(b'perfheads', formatteropts)
523 523 def perfheads(ui, repo, **opts):
524 524 opts = _byteskwargs(opts)
525 525 timer, fm = gettimer(ui, opts)
526 526 cl = repo.changelog
527 527 def d():
528 528 len(cl.headrevs())
529 529 clearcaches(cl)
530 530 timer(d)
531 531 fm.end()
532 532
533 533 @command(b'perftags', formatteropts)
534 534 def perftags(ui, repo, **opts):
535 535 import mercurial.changelog
536 536 import mercurial.manifest
537 537
538 538 opts = _byteskwargs(opts)
539 539 timer, fm = gettimer(ui, opts)
540 540 svfs = getsvfs(repo)
541 541 repocleartagscache = repocleartagscachefunc(repo)
542 542 def s():
543 543 repo.changelog = mercurial.changelog.changelog(svfs)
544 544 rootmanifest = mercurial.manifest.manifestrevlog(svfs)
545 545 repo.manifestlog = mercurial.manifest.manifestlog(svfs, repo,
546 546 rootmanifest)
547 547 repocleartagscache()
548 548 def t():
549 549 return len(repo.tags())
550 550 timer(t, setup=s)
551 551 fm.end()
552 552
553 553 @command(b'perfancestors', formatteropts)
554 554 def perfancestors(ui, repo, **opts):
555 555 opts = _byteskwargs(opts)
556 556 timer, fm = gettimer(ui, opts)
557 557 heads = repo.changelog.headrevs()
558 558 def d():
559 559 for a in repo.changelog.ancestors(heads):
560 560 pass
561 561 timer(d)
562 562 fm.end()
563 563
564 564 @command(b'perfancestorset', formatteropts)
565 565 def perfancestorset(ui, repo, revset, **opts):
566 566 opts = _byteskwargs(opts)
567 567 timer, fm = gettimer(ui, opts)
568 568 revs = repo.revs(revset)
569 569 heads = repo.changelog.headrevs()
570 570 def d():
571 571 s = repo.changelog.ancestors(heads)
572 572 for rev in revs:
573 573 rev in s
574 574 timer(d)
575 575 fm.end()
576 576
577 577 @command(b'perfbookmarks', formatteropts)
578 578 def perfbookmarks(ui, repo, **opts):
579 579 """benchmark parsing bookmarks from disk to memory"""
580 580 opts = _byteskwargs(opts)
581 581 timer, fm = gettimer(ui, opts)
582 582
583 583 def s():
584 584 clearfilecache(repo, b'_bookmarks')
585 585 def d():
586 586 repo._bookmarks
587 587 timer(d, setup=s)
588 588 fm.end()
589 589
590 590 @command(b'perfbundleread', formatteropts, b'BUNDLE')
591 591 def perfbundleread(ui, repo, bundlepath, **opts):
592 592 """Benchmark reading of bundle files.
593 593
594 594 This command is meant to isolate the I/O part of bundle reading as
595 595 much as possible.
596 596 """
597 597 from mercurial import (
598 598 bundle2,
599 599 exchange,
600 600 streamclone,
601 601 )
602 602
603 603 opts = _byteskwargs(opts)
604 604
605 605 def makebench(fn):
606 606 def run():
607 607 with open(bundlepath, b'rb') as fh:
608 608 bundle = exchange.readbundle(ui, fh, bundlepath)
609 609 fn(bundle)
610 610
611 611 return run
612 612
613 613 def makereadnbytes(size):
614 614 def run():
615 615 with open(bundlepath, b'rb') as fh:
616 616 bundle = exchange.readbundle(ui, fh, bundlepath)
617 617 while bundle.read(size):
618 618 pass
619 619
620 620 return run
621 621
622 622 def makestdioread(size):
623 623 def run():
624 624 with open(bundlepath, b'rb') as fh:
625 625 while fh.read(size):
626 626 pass
627 627
628 628 return run
629 629
630 630 # bundle1
631 631
632 632 def deltaiter(bundle):
633 633 for delta in bundle.deltaiter():
634 634 pass
635 635
636 636 def iterchunks(bundle):
637 637 for chunk in bundle.getchunks():
638 638 pass
639 639
640 640 # bundle2
641 641
642 642 def forwardchunks(bundle):
643 643 for chunk in bundle._forwardchunks():
644 644 pass
645 645
646 646 def iterparts(bundle):
647 647 for part in bundle.iterparts():
648 648 pass
649 649
650 650 def iterpartsseekable(bundle):
651 651 for part in bundle.iterparts(seekable=True):
652 652 pass
653 653
654 654 def seek(bundle):
655 655 for part in bundle.iterparts(seekable=True):
656 656 part.seek(0, os.SEEK_END)
657 657
658 658 def makepartreadnbytes(size):
659 659 def run():
660 660 with open(bundlepath, b'rb') as fh:
661 661 bundle = exchange.readbundle(ui, fh, bundlepath)
662 662 for part in bundle.iterparts():
663 663 while part.read(size):
664 664 pass
665 665
666 666 return run
667 667
668 668 benches = [
669 669 (makestdioread(8192), b'read(8k)'),
670 670 (makestdioread(16384), b'read(16k)'),
671 671 (makestdioread(32768), b'read(32k)'),
672 672 (makestdioread(131072), b'read(128k)'),
673 673 ]
674 674
675 675 with open(bundlepath, b'rb') as fh:
676 676 bundle = exchange.readbundle(ui, fh, bundlepath)
677 677
678 678 if isinstance(bundle, changegroup.cg1unpacker):
679 679 benches.extend([
680 680 (makebench(deltaiter), b'cg1 deltaiter()'),
681 681 (makebench(iterchunks), b'cg1 getchunks()'),
682 682 (makereadnbytes(8192), b'cg1 read(8k)'),
683 683 (makereadnbytes(16384), b'cg1 read(16k)'),
684 684 (makereadnbytes(32768), b'cg1 read(32k)'),
685 685 (makereadnbytes(131072), b'cg1 read(128k)'),
686 686 ])
687 687 elif isinstance(bundle, bundle2.unbundle20):
688 688 benches.extend([
689 689 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
690 690 (makebench(iterparts), b'bundle2 iterparts()'),
691 691 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
692 692 (makebench(seek), b'bundle2 part seek()'),
693 693 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
694 694 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
695 695 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
696 696 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
697 697 ])
698 698 elif isinstance(bundle, streamclone.streamcloneapplier):
699 699 raise error.Abort(b'stream clone bundles not supported')
700 700 else:
701 701 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
702 702
703 703 for fn, title in benches:
704 704 timer, fm = gettimer(ui, opts)
705 705 timer(fn, title=title)
706 706 fm.end()
707 707
708 708 @command(b'perfchangegroupchangelog', formatteropts +
709 709 [(b'', b'version', b'02', b'changegroup version'),
710 710 (b'r', b'rev', b'', b'revisions to add to changegroup')])
711 711 def perfchangegroupchangelog(ui, repo, version=b'02', rev=None, **opts):
712 712 """Benchmark producing a changelog group for a changegroup.
713 713
714 714 This measures the time spent processing the changelog during a
715 715 bundle operation. This occurs during `hg bundle` and on a server
716 716 processing a `getbundle` wire protocol request (handles clones
717 717 and pull requests).
718 718
719 719 By default, all revisions are added to the changegroup.
720 720 """
721 721 opts = _byteskwargs(opts)
722 722 cl = repo.changelog
723 723 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
724 724 bundler = changegroup.getbundler(version, repo)
725 725
726 726 def d():
727 727 state, chunks = bundler._generatechangelog(cl, nodes)
728 728 for chunk in chunks:
729 729 pass
730 730
731 731 timer, fm = gettimer(ui, opts)
732 732
733 733 # Terminal printing can interfere with timing. So disable it.
734 734 with ui.configoverride({(b'progress', b'disable'): True}):
735 735 timer(d)
736 736
737 737 fm.end()
738 738
739 739 @command(b'perfdirs', formatteropts)
740 740 def perfdirs(ui, repo, **opts):
741 741 opts = _byteskwargs(opts)
742 742 timer, fm = gettimer(ui, opts)
743 743 dirstate = repo.dirstate
744 744 b'a' in dirstate
745 745 def d():
746 746 dirstate.hasdir(b'a')
747 747 del dirstate._map._dirs
748 748 timer(d)
749 749 fm.end()
750 750
751 751 @command(b'perfdirstate', formatteropts)
752 752 def perfdirstate(ui, repo, **opts):
753 753 opts = _byteskwargs(opts)
754 754 timer, fm = gettimer(ui, opts)
755 755 b"a" in repo.dirstate
756 756 def d():
757 757 repo.dirstate.invalidate()
758 758 b"a" in repo.dirstate
759 759 timer(d)
760 760 fm.end()
761 761
762 762 @command(b'perfdirstatedirs', formatteropts)
763 763 def perfdirstatedirs(ui, repo, **opts):
764 764 opts = _byteskwargs(opts)
765 765 timer, fm = gettimer(ui, opts)
766 766 b"a" in repo.dirstate
767 767 def d():
768 768 repo.dirstate.hasdir(b"a")
769 769 del repo.dirstate._map._dirs
770 770 timer(d)
771 771 fm.end()
772 772
773 773 @command(b'perfdirstatefoldmap', formatteropts)
774 774 def perfdirstatefoldmap(ui, repo, **opts):
775 775 opts = _byteskwargs(opts)
776 776 timer, fm = gettimer(ui, opts)
777 777 dirstate = repo.dirstate
778 778 b'a' in dirstate
779 779 def d():
780 780 dirstate._map.filefoldmap.get(b'a')
781 781 del dirstate._map.filefoldmap
782 782 timer(d)
783 783 fm.end()
784 784
785 785 @command(b'perfdirfoldmap', formatteropts)
786 786 def perfdirfoldmap(ui, repo, **opts):
787 787 opts = _byteskwargs(opts)
788 788 timer, fm = gettimer(ui, opts)
789 789 dirstate = repo.dirstate
790 790 b'a' in dirstate
791 791 def d():
792 792 dirstate._map.dirfoldmap.get(b'a')
793 793 del dirstate._map.dirfoldmap
794 794 del dirstate._map._dirs
795 795 timer(d)
796 796 fm.end()
797 797
798 798 @command(b'perfdirstatewrite', formatteropts)
799 799 def perfdirstatewrite(ui, repo, **opts):
800 800 opts = _byteskwargs(opts)
801 801 timer, fm = gettimer(ui, opts)
802 802 ds = repo.dirstate
803 803 b"a" in ds
804 804 def d():
805 805 ds._dirty = True
806 806 ds.write(repo.currenttransaction())
807 807 timer(d)
808 808 fm.end()
809 809
810 810 @command(b'perfmergecalculate',
811 811 [(b'r', b'rev', b'.', b'rev to merge against')] + formatteropts)
812 812 def perfmergecalculate(ui, repo, rev, **opts):
813 813 opts = _byteskwargs(opts)
814 814 timer, fm = gettimer(ui, opts)
815 815 wctx = repo[None]
816 816 rctx = scmutil.revsingle(repo, rev, rev)
817 817 ancestor = wctx.ancestor(rctx)
818 818 # we don't want working dir files to be stat'd in the benchmark, so prime
819 819 # that cache
820 820 wctx.dirty()
821 821 def d():
822 822 # acceptremote is True because we don't want prompts in the middle of
823 823 # our benchmark
824 824 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
825 825 acceptremote=True, followcopies=True)
826 826 timer(d)
827 827 fm.end()
828 828
829 829 @command(b'perfpathcopies', [], b"REV REV")
830 830 def perfpathcopies(ui, repo, rev1, rev2, **opts):
831 831 opts = _byteskwargs(opts)
832 832 timer, fm = gettimer(ui, opts)
833 833 ctx1 = scmutil.revsingle(repo, rev1, rev1)
834 834 ctx2 = scmutil.revsingle(repo, rev2, rev2)
835 835 def d():
836 836 copies.pathcopies(ctx1, ctx2)
837 837 timer(d)
838 838 fm.end()
839 839
840 840 @command(b'perfphases',
841 841 [(b'', b'full', False, b'include file reading time too'),
842 842 ], b"")
843 843 def perfphases(ui, repo, **opts):
844 844 """benchmark phasesets computation"""
845 845 opts = _byteskwargs(opts)
846 846 timer, fm = gettimer(ui, opts)
847 847 _phases = repo._phasecache
848 848 full = opts.get(b'full')
849 849 def d():
850 850 phases = _phases
851 851 if full:
852 852 clearfilecache(repo, b'_phasecache')
853 853 phases = repo._phasecache
854 854 phases.invalidate()
855 855 phases.loadphaserevs(repo)
856 856 timer(d)
857 857 fm.end()
858 858
859 859 @command(b'perfphasesremote',
860 860 [], b"[DEST]")
861 861 def perfphasesremote(ui, repo, dest=None, **opts):
862 862 """benchmark time needed to analyse phases of the remote server"""
863 863 from mercurial.node import (
864 864 bin,
865 865 )
866 866 from mercurial import (
867 867 exchange,
868 868 hg,
869 869 phases,
870 870 )
871 871 opts = _byteskwargs(opts)
872 872 timer, fm = gettimer(ui, opts)
873 873
874 874 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
875 875 if not path:
876 876 raise error.Abort((b'default repository not configured!'),
877 877 hint=(b"see 'hg help config.paths'"))
878 878 dest = path.pushloc or path.loc
879 879 branches = (path.branch, opts.get(b'branch') or [])
880 880 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
881 881 revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get(b'rev'))
882 882 other = hg.peer(repo, opts, dest)
883 883
884 884 # easier to perform discovery through the operation
885 885 op = exchange.pushoperation(repo, other)
886 886 exchange._pushdiscoverychangeset(op)
887 887
888 888 remotesubset = op.fallbackheads
889 889
890 890 with other.commandexecutor() as e:
891 891 remotephases = e.callcommand(b'listkeys',
892 892 {b'namespace': b'phases'}).result()
893 893 del other
894 894 publishing = remotephases.get(b'publishing', False)
895 895 if publishing:
896 896 ui.status((b'publishing: yes\n'))
897 897 else:
898 898 ui.status((b'publishing: no\n'))
899 899
900 900 nodemap = repo.changelog.nodemap
901 901 nonpublishroots = 0
902 902 for nhex, phase in remotephases.iteritems():
903 903 if nhex == b'publishing': # ignore data related to publish option
904 904 continue
905 905 node = bin(nhex)
906 906 if node in nodemap and int(phase):
907 907 nonpublishroots += 1
908 908 ui.status((b'number of roots: %d\n') % len(remotephases))
909 909 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
910 910 def d():
911 911 phases.remotephasessummary(repo,
912 912 remotesubset,
913 913 remotephases)
914 914 timer(d)
915 915 fm.end()
916 916
917 917 @command(b'perfmanifest',[
918 918 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
919 919 (b'', b'clear-disk', False, b'clear on-disk caches too'),
920 920 ] + formatteropts, b'REV|NODE')
921 921 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
922 922 """benchmark the time to read a manifest from disk and return a usable
923 923 dict-like object
924 924
925 925 Manifest caches are cleared before retrieval."""
926 926 opts = _byteskwargs(opts)
927 927 timer, fm = gettimer(ui, opts)
928 928 if not manifest_rev:
929 929 ctx = scmutil.revsingle(repo, rev, rev)
930 930 t = ctx.manifestnode()
931 931 else:
932 932 from mercurial.node import bin
933 933
934 934 if len(rev) == 40:
935 935 t = bin(rev)
936 936 else:
937 937 try:
938 938 rev = int(rev)
939 939
940 940 if util.safehasattr(repo.manifestlog, b'getstorage'):
941 941 t = repo.manifestlog.getstorage(b'').node(rev)
942 942 else:
943 943 t = repo.manifestlog._revlog.lookup(rev)
944 944 except ValueError:
945 945 raise error.Abort(b'manifest revision must be integer or full '
946 946 b'node')
947 947 def d():
948 948 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
949 949 repo.manifestlog[t].read()
950 950 timer(d)
951 951 fm.end()
952 952
953 953 @command(b'perfchangeset', formatteropts)
954 954 def perfchangeset(ui, repo, rev, **opts):
955 955 opts = _byteskwargs(opts)
956 956 timer, fm = gettimer(ui, opts)
957 957 n = scmutil.revsingle(repo, rev).node()
958 958 def d():
959 959 repo.changelog.read(n)
960 960 #repo.changelog._cache = None
961 961 timer(d)
962 962 fm.end()
963 963
964 964 @command(b'perfindex', formatteropts)
965 965 def perfindex(ui, repo, **opts):
966 966 import mercurial.revlog
967 967 opts = _byteskwargs(opts)
968 968 timer, fm = gettimer(ui, opts)
969 969 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
970 970 n = repo[b"tip"].node()
971 971 svfs = getsvfs(repo)
972 972 def d():
973 973 cl = mercurial.revlog.revlog(svfs, b"00changelog.i")
974 974 cl.rev(n)
975 975 timer(d)
976 976 fm.end()
977 977
978 978 @command(b'perfstartup', formatteropts)
979 979 def perfstartup(ui, repo, **opts):
980 980 opts = _byteskwargs(opts)
981 981 timer, fm = gettimer(ui, opts)
982 982 def d():
983 983 if os.name != r'nt':
984 984 os.system(b"HGRCPATH= %s version -q > /dev/null" %
985 985 fsencode(sys.argv[0]))
986 986 else:
987 987 os.environ[r'HGRCPATH'] = r' '
988 988 os.system(r"%s version -q > NUL" % sys.argv[0])
989 989 timer(d)
990 990 fm.end()
991 991
992 992 @command(b'perfparents', formatteropts)
993 993 def perfparents(ui, repo, **opts):
994 994 opts = _byteskwargs(opts)
995 995 timer, fm = gettimer(ui, opts)
996 996 # control the number of commits perfparents iterates over
997 997 # experimental config: perf.parentscount
998 998 count = getint(ui, b"perf", b"parentscount", 1000)
999 999 if len(repo.changelog) < count:
1000 1000 raise error.Abort(b"repo needs %d commits for this test" % count)
1001 1001 repo = repo.unfiltered()
1002 1002 nl = [repo.changelog.node(i) for i in _xrange(count)]
1003 1003 def d():
1004 1004 for n in nl:
1005 1005 repo.changelog.parents(n)
1006 1006 timer(d)
1007 1007 fm.end()
1008 1008
1009 1009 @command(b'perfctxfiles', formatteropts)
1010 1010 def perfctxfiles(ui, repo, x, **opts):
1011 1011 opts = _byteskwargs(opts)
1012 1012 x = int(x)
1013 1013 timer, fm = gettimer(ui, opts)
1014 1014 def d():
1015 1015 len(repo[x].files())
1016 1016 timer(d)
1017 1017 fm.end()
1018 1018
1019 1019 @command(b'perfrawfiles', formatteropts)
1020 1020 def perfrawfiles(ui, repo, x, **opts):
1021 1021 opts = _byteskwargs(opts)
1022 1022 x = int(x)
1023 1023 timer, fm = gettimer(ui, opts)
1024 1024 cl = repo.changelog
1025 1025 def d():
1026 1026 len(cl.read(x)[3])
1027 1027 timer(d)
1028 1028 fm.end()
1029 1029
1030 1030 @command(b'perflookup', formatteropts)
1031 1031 def perflookup(ui, repo, rev, **opts):
1032 1032 opts = _byteskwargs(opts)
1033 1033 timer, fm = gettimer(ui, opts)
1034 1034 timer(lambda: len(repo.lookup(rev)))
1035 1035 fm.end()
1036 1036
1037 1037 @command(b'perflinelogedits',
1038 1038 [(b'n', b'edits', 10000, b'number of edits'),
1039 1039 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1040 1040 ], norepo=True)
1041 1041 def perflinelogedits(ui, **opts):
1042 1042 from mercurial import linelog
1043 1043
1044 1044 opts = _byteskwargs(opts)
1045 1045
1046 1046 edits = opts[b'edits']
1047 1047 maxhunklines = opts[b'max_hunk_lines']
1048 1048
1049 1049 maxb1 = 100000
1050 1050 random.seed(0)
1051 1051 randint = random.randint
1052 1052 currentlines = 0
1053 1053 arglist = []
1054 1054 for rev in _xrange(edits):
1055 1055 a1 = randint(0, currentlines)
1056 1056 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1057 1057 b1 = randint(0, maxb1)
1058 1058 b2 = randint(b1, b1 + maxhunklines)
1059 1059 currentlines += (b2 - b1) - (a2 - a1)
1060 1060 arglist.append((rev, a1, a2, b1, b2))
1061 1061
1062 1062 def d():
1063 1063 ll = linelog.linelog()
1064 1064 for args in arglist:
1065 1065 ll.replacelines(*args)
1066 1066
1067 1067 timer, fm = gettimer(ui, opts)
1068 1068 timer(d)
1069 1069 fm.end()
1070 1070
1071 1071 @command(b'perfrevrange', formatteropts)
1072 1072 def perfrevrange(ui, repo, *specs, **opts):
1073 1073 opts = _byteskwargs(opts)
1074 1074 timer, fm = gettimer(ui, opts)
1075 1075 revrange = scmutil.revrange
1076 1076 timer(lambda: len(revrange(repo, specs)))
1077 1077 fm.end()
1078 1078
1079 1079 @command(b'perfnodelookup', formatteropts)
1080 1080 def perfnodelookup(ui, repo, rev, **opts):
1081 1081 opts = _byteskwargs(opts)
1082 1082 timer, fm = gettimer(ui, opts)
1083 1083 import mercurial.revlog
1084 1084 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1085 1085 n = scmutil.revsingle(repo, rev).node()
1086 1086 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1087 1087 def d():
1088 1088 cl.rev(n)
1089 1089 clearcaches(cl)
1090 1090 timer(d)
1091 1091 fm.end()
1092 1092
1093 1093 @command(b'perflog',
1094 1094 [(b'', b'rename', False, b'ask log to follow renames')
1095 1095 ] + formatteropts)
1096 1096 def perflog(ui, repo, rev=None, **opts):
1097 1097 opts = _byteskwargs(opts)
1098 1098 if rev is None:
1099 1099 rev=[]
1100 1100 timer, fm = gettimer(ui, opts)
1101 1101 ui.pushbuffer()
1102 1102 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1103 1103 copies=opts.get(b'rename')))
1104 1104 ui.popbuffer()
1105 1105 fm.end()
1106 1106
1107 1107 @command(b'perfmoonwalk', formatteropts)
1108 1108 def perfmoonwalk(ui, repo, **opts):
1109 1109 """benchmark walking the changelog backwards
1110 1110
1111 1111 This also loads the changelog data for each revision in the changelog.
1112 1112 """
1113 1113 opts = _byteskwargs(opts)
1114 1114 timer, fm = gettimer(ui, opts)
1115 1115 def moonwalk():
1116 1116 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1117 1117 ctx = repo[i]
1118 1118 ctx.branch() # read changelog data (in addition to the index)
1119 1119 timer(moonwalk)
1120 1120 fm.end()
1121 1121
1122 1122 @command(b'perftemplating',
1123 1123 [(b'r', b'rev', [], b'revisions to run the template on'),
1124 1124 ] + formatteropts)
1125 1125 def perftemplating(ui, repo, testedtemplate=None, **opts):
1126 1126 """test the rendering time of a given template"""
1127 1127 if makelogtemplater is None:
1128 1128 raise error.Abort((b"perftemplating not available with this Mercurial"),
1129 1129 hint=b"use 4.3 or later")
1130 1130
1131 1131 opts = _byteskwargs(opts)
1132 1132
1133 1133 nullui = ui.copy()
1134 1134 nullui.fout = open(os.devnull, r'wb')
1135 1135 nullui.disablepager()
1136 1136 revs = opts.get(b'rev')
1137 1137 if not revs:
1138 1138 revs = [b'all()']
1139 1139 revs = list(scmutil.revrange(repo, revs))
1140 1140
1141 1141 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1142 1142 b' {author|person}: {desc|firstline}\n')
1143 1143 if testedtemplate is None:
1144 1144 testedtemplate = defaulttemplate
1145 1145 displayer = makelogtemplater(nullui, repo, testedtemplate)
1146 1146 def format():
1147 1147 for r in revs:
1148 1148 ctx = repo[r]
1149 1149 displayer.show(ctx)
1150 1150 displayer.flush(ctx)
1151 1151
1152 1152 timer, fm = gettimer(ui, opts)
1153 1153 timer(format)
1154 1154 fm.end()
1155 1155
1156 @command(b'perfhelper-tracecopies', formatteropts +
1157 [
1158 (b'r', b'revs', [], b'restrict search to these revisions'),
1159 ])
1160 def perfhelpertracecopies(ui, repo, revs=[], **opts):
1161 """find statistic about potential parameters for the `perftracecopies`
1162
1163 This command find source-destination pair relevant for copytracing testing.
1164 It report value for some of the parameters that impact copy tracing time.
1165 """
1166 opts = _byteskwargs(opts)
1167 fm = ui.formatter(b'perf', opts)
1168 header = '%12s %12s %12s %12s\n'
1169 output = ("%(source)12s %(destination)12s "
1170 "%(nbrevs)12d %(nbmissingfiles)12d\n")
1171 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
1172
1173 if not revs:
1174 revs = ['all()']
1175 revs = scmutil.revrange(repo, revs)
1176
1177 roi = repo.revs('merge() and %ld', revs)
1178 for r in roi:
1179 ctx = repo[r]
1180 p1 = ctx.p1().rev()
1181 p2 = ctx.p2().rev()
1182 bases = repo.changelog._commonancestorsheads(p1, p2)
1183 for p in (p1, p2):
1184 for b in bases:
1185 base = repo[b]
1186 parent = repo[p]
1187 missing = copies._computeforwardmissing(base, parent)
1188 if not missing:
1189 continue
1190 fm.startitem()
1191 data = {
1192 b'source': base.hex(),
1193 b'destination': parent.hex(),
1194 b'nbrevs': len(repo.revs('%d::%d', b, p)),
1195 b'nbmissingfiles': len(missing),
1196 }
1197 fm.data(**data)
1198 out = data.copy()
1199 out['source'] = fm.hexfunc(base.node())
1200 out['destination'] = fm.hexfunc(parent.node())
1201 fm.plain(output % out)
1202 fm.end()
1203
1156 1204 @command(b'perfcca', formatteropts)
1157 1205 def perfcca(ui, repo, **opts):
1158 1206 opts = _byteskwargs(opts)
1159 1207 timer, fm = gettimer(ui, opts)
1160 1208 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1161 1209 fm.end()
1162 1210
1163 1211 @command(b'perffncacheload', formatteropts)
1164 1212 def perffncacheload(ui, repo, **opts):
1165 1213 opts = _byteskwargs(opts)
1166 1214 timer, fm = gettimer(ui, opts)
1167 1215 s = repo.store
1168 1216 def d():
1169 1217 s.fncache._load()
1170 1218 timer(d)
1171 1219 fm.end()
1172 1220
1173 1221 @command(b'perffncachewrite', formatteropts)
1174 1222 def perffncachewrite(ui, repo, **opts):
1175 1223 opts = _byteskwargs(opts)
1176 1224 timer, fm = gettimer(ui, opts)
1177 1225 s = repo.store
1178 1226 lock = repo.lock()
1179 1227 s.fncache._load()
1180 1228 tr = repo.transaction(b'perffncachewrite')
1181 1229 tr.addbackup(b'fncache')
1182 1230 def d():
1183 1231 s.fncache._dirty = True
1184 1232 s.fncache.write(tr)
1185 1233 timer(d)
1186 1234 tr.close()
1187 1235 lock.release()
1188 1236 fm.end()
1189 1237
1190 1238 @command(b'perffncacheencode', formatteropts)
1191 1239 def perffncacheencode(ui, repo, **opts):
1192 1240 opts = _byteskwargs(opts)
1193 1241 timer, fm = gettimer(ui, opts)
1194 1242 s = repo.store
1195 1243 s.fncache._load()
1196 1244 def d():
1197 1245 for p in s.fncache.entries:
1198 1246 s.encode(p)
1199 1247 timer(d)
1200 1248 fm.end()
1201 1249
1202 1250 def _bdiffworker(q, blocks, xdiff, ready, done):
1203 1251 while not done.is_set():
1204 1252 pair = q.get()
1205 1253 while pair is not None:
1206 1254 if xdiff:
1207 1255 mdiff.bdiff.xdiffblocks(*pair)
1208 1256 elif blocks:
1209 1257 mdiff.bdiff.blocks(*pair)
1210 1258 else:
1211 1259 mdiff.textdiff(*pair)
1212 1260 q.task_done()
1213 1261 pair = q.get()
1214 1262 q.task_done() # for the None one
1215 1263 with ready:
1216 1264 ready.wait()
1217 1265
1218 1266 def _manifestrevision(repo, mnode):
1219 1267 ml = repo.manifestlog
1220 1268
1221 1269 if util.safehasattr(ml, b'getstorage'):
1222 1270 store = ml.getstorage(b'')
1223 1271 else:
1224 1272 store = ml._revlog
1225 1273
1226 1274 return store.revision(mnode)
1227 1275
1228 1276 @command(b'perfbdiff', revlogopts + formatteropts + [
1229 1277 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1230 1278 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1231 1279 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1232 1280 (b'', b'blocks', False, b'test computing diffs into blocks'),
1233 1281 (b'', b'xdiff', False, b'use xdiff algorithm'),
1234 1282 ],
1235 1283
1236 1284 b'-c|-m|FILE REV')
1237 1285 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1238 1286 """benchmark a bdiff between revisions
1239 1287
1240 1288 By default, benchmark a bdiff between its delta parent and itself.
1241 1289
1242 1290 With ``--count``, benchmark bdiffs between delta parents and self for N
1243 1291 revisions starting at the specified revision.
1244 1292
1245 1293 With ``--alldata``, assume the requested revision is a changeset and
1246 1294 measure bdiffs for all changes related to that changeset (manifest
1247 1295 and filelogs).
1248 1296 """
1249 1297 opts = _byteskwargs(opts)
1250 1298
1251 1299 if opts[b'xdiff'] and not opts[b'blocks']:
1252 1300 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1253 1301
1254 1302 if opts[b'alldata']:
1255 1303 opts[b'changelog'] = True
1256 1304
1257 1305 if opts.get(b'changelog') or opts.get(b'manifest'):
1258 1306 file_, rev = None, file_
1259 1307 elif rev is None:
1260 1308 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1261 1309
1262 1310 blocks = opts[b'blocks']
1263 1311 xdiff = opts[b'xdiff']
1264 1312 textpairs = []
1265 1313
1266 1314 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1267 1315
1268 1316 startrev = r.rev(r.lookup(rev))
1269 1317 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1270 1318 if opts[b'alldata']:
1271 1319 # Load revisions associated with changeset.
1272 1320 ctx = repo[rev]
1273 1321 mtext = _manifestrevision(repo, ctx.manifestnode())
1274 1322 for pctx in ctx.parents():
1275 1323 pman = _manifestrevision(repo, pctx.manifestnode())
1276 1324 textpairs.append((pman, mtext))
1277 1325
1278 1326 # Load filelog revisions by iterating manifest delta.
1279 1327 man = ctx.manifest()
1280 1328 pman = ctx.p1().manifest()
1281 1329 for filename, change in pman.diff(man).items():
1282 1330 fctx = repo.file(filename)
1283 1331 f1 = fctx.revision(change[0][0] or -1)
1284 1332 f2 = fctx.revision(change[1][0] or -1)
1285 1333 textpairs.append((f1, f2))
1286 1334 else:
1287 1335 dp = r.deltaparent(rev)
1288 1336 textpairs.append((r.revision(dp), r.revision(rev)))
1289 1337
1290 1338 withthreads = threads > 0
1291 1339 if not withthreads:
1292 1340 def d():
1293 1341 for pair in textpairs:
1294 1342 if xdiff:
1295 1343 mdiff.bdiff.xdiffblocks(*pair)
1296 1344 elif blocks:
1297 1345 mdiff.bdiff.blocks(*pair)
1298 1346 else:
1299 1347 mdiff.textdiff(*pair)
1300 1348 else:
1301 1349 q = queue()
1302 1350 for i in _xrange(threads):
1303 1351 q.put(None)
1304 1352 ready = threading.Condition()
1305 1353 done = threading.Event()
1306 1354 for i in _xrange(threads):
1307 1355 threading.Thread(target=_bdiffworker,
1308 1356 args=(q, blocks, xdiff, ready, done)).start()
1309 1357 q.join()
1310 1358 def d():
1311 1359 for pair in textpairs:
1312 1360 q.put(pair)
1313 1361 for i in _xrange(threads):
1314 1362 q.put(None)
1315 1363 with ready:
1316 1364 ready.notify_all()
1317 1365 q.join()
1318 1366 timer, fm = gettimer(ui, opts)
1319 1367 timer(d)
1320 1368 fm.end()
1321 1369
1322 1370 if withthreads:
1323 1371 done.set()
1324 1372 for i in _xrange(threads):
1325 1373 q.put(None)
1326 1374 with ready:
1327 1375 ready.notify_all()
1328 1376
1329 1377 @command(b'perfunidiff', revlogopts + formatteropts + [
1330 1378 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1331 1379 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
1332 1380 ], b'-c|-m|FILE REV')
1333 1381 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1334 1382 """benchmark a unified diff between revisions
1335 1383
1336 1384 This doesn't include any copy tracing - it's just a unified diff
1337 1385 of the texts.
1338 1386
1339 1387 By default, benchmark a diff between its delta parent and itself.
1340 1388
1341 1389 With ``--count``, benchmark diffs between delta parents and self for N
1342 1390 revisions starting at the specified revision.
1343 1391
1344 1392 With ``--alldata``, assume the requested revision is a changeset and
1345 1393 measure diffs for all changes related to that changeset (manifest
1346 1394 and filelogs).
1347 1395 """
1348 1396 opts = _byteskwargs(opts)
1349 1397 if opts[b'alldata']:
1350 1398 opts[b'changelog'] = True
1351 1399
1352 1400 if opts.get(b'changelog') or opts.get(b'manifest'):
1353 1401 file_, rev = None, file_
1354 1402 elif rev is None:
1355 1403 raise error.CommandError(b'perfunidiff', b'invalid arguments')
1356 1404
1357 1405 textpairs = []
1358 1406
1359 1407 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
1360 1408
1361 1409 startrev = r.rev(r.lookup(rev))
1362 1410 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1363 1411 if opts[b'alldata']:
1364 1412 # Load revisions associated with changeset.
1365 1413 ctx = repo[rev]
1366 1414 mtext = _manifestrevision(repo, ctx.manifestnode())
1367 1415 for pctx in ctx.parents():
1368 1416 pman = _manifestrevision(repo, pctx.manifestnode())
1369 1417 textpairs.append((pman, mtext))
1370 1418
1371 1419 # Load filelog revisions by iterating manifest delta.
1372 1420 man = ctx.manifest()
1373 1421 pman = ctx.p1().manifest()
1374 1422 for filename, change in pman.diff(man).items():
1375 1423 fctx = repo.file(filename)
1376 1424 f1 = fctx.revision(change[0][0] or -1)
1377 1425 f2 = fctx.revision(change[1][0] or -1)
1378 1426 textpairs.append((f1, f2))
1379 1427 else:
1380 1428 dp = r.deltaparent(rev)
1381 1429 textpairs.append((r.revision(dp), r.revision(rev)))
1382 1430
1383 1431 def d():
1384 1432 for left, right in textpairs:
1385 1433 # The date strings don't matter, so we pass empty strings.
1386 1434 headerlines, hunks = mdiff.unidiff(
1387 1435 left, b'', right, b'', b'left', b'right', binary=False)
1388 1436 # consume iterators in roughly the way patch.py does
1389 1437 b'\n'.join(headerlines)
1390 1438 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1391 1439 timer, fm = gettimer(ui, opts)
1392 1440 timer(d)
1393 1441 fm.end()
1394 1442
1395 1443 @command(b'perfdiffwd', formatteropts)
1396 1444 def perfdiffwd(ui, repo, **opts):
1397 1445 """Profile diff of working directory changes"""
1398 1446 opts = _byteskwargs(opts)
1399 1447 timer, fm = gettimer(ui, opts)
1400 1448 options = {
1401 1449 'w': 'ignore_all_space',
1402 1450 'b': 'ignore_space_change',
1403 1451 'B': 'ignore_blank_lines',
1404 1452 }
1405 1453
1406 1454 for diffopt in ('', 'w', 'b', 'B', 'wB'):
1407 1455 opts = dict((options[c], b'1') for c in diffopt)
1408 1456 def d():
1409 1457 ui.pushbuffer()
1410 1458 commands.diff(ui, repo, **opts)
1411 1459 ui.popbuffer()
1412 1460 diffopt = diffopt.encode('ascii')
1413 1461 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
1414 1462 timer(d, title=title)
1415 1463 fm.end()
1416 1464
1417 1465 @command(b'perfrevlogindex', revlogopts + formatteropts,
1418 1466 b'-c|-m|FILE')
1419 1467 def perfrevlogindex(ui, repo, file_=None, **opts):
1420 1468 """Benchmark operations against a revlog index.
1421 1469
1422 1470 This tests constructing a revlog instance, reading index data,
1423 1471 parsing index data, and performing various operations related to
1424 1472 index data.
1425 1473 """
1426 1474
1427 1475 opts = _byteskwargs(opts)
1428 1476
1429 1477 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
1430 1478
1431 1479 opener = getattr(rl, 'opener') # trick linter
1432 1480 indexfile = rl.indexfile
1433 1481 data = opener.read(indexfile)
1434 1482
1435 1483 header = struct.unpack(b'>I', data[0:4])[0]
1436 1484 version = header & 0xFFFF
1437 1485 if version == 1:
1438 1486 revlogio = revlog.revlogio()
1439 1487 inline = header & (1 << 16)
1440 1488 else:
1441 1489 raise error.Abort((b'unsupported revlog version: %d') % version)
1442 1490
1443 1491 rllen = len(rl)
1444 1492
1445 1493 node0 = rl.node(0)
1446 1494 node25 = rl.node(rllen // 4)
1447 1495 node50 = rl.node(rllen // 2)
1448 1496 node75 = rl.node(rllen // 4 * 3)
1449 1497 node100 = rl.node(rllen - 1)
1450 1498
1451 1499 allrevs = range(rllen)
1452 1500 allrevsrev = list(reversed(allrevs))
1453 1501 allnodes = [rl.node(rev) for rev in range(rllen)]
1454 1502 allnodesrev = list(reversed(allnodes))
1455 1503
1456 1504 def constructor():
1457 1505 revlog.revlog(opener, indexfile)
1458 1506
1459 1507 def read():
1460 1508 with opener(indexfile) as fh:
1461 1509 fh.read()
1462 1510
1463 1511 def parseindex():
1464 1512 revlogio.parseindex(data, inline)
1465 1513
1466 1514 def getentry(revornode):
1467 1515 index = revlogio.parseindex(data, inline)[0]
1468 1516 index[revornode]
1469 1517
1470 1518 def getentries(revs, count=1):
1471 1519 index = revlogio.parseindex(data, inline)[0]
1472 1520
1473 1521 for i in range(count):
1474 1522 for rev in revs:
1475 1523 index[rev]
1476 1524
1477 1525 def resolvenode(node):
1478 1526 nodemap = revlogio.parseindex(data, inline)[1]
1479 1527 # This only works for the C code.
1480 1528 if nodemap is None:
1481 1529 return
1482 1530
1483 1531 try:
1484 1532 nodemap[node]
1485 1533 except error.RevlogError:
1486 1534 pass
1487 1535
1488 1536 def resolvenodes(nodes, count=1):
1489 1537 nodemap = revlogio.parseindex(data, inline)[1]
1490 1538 if nodemap is None:
1491 1539 return
1492 1540
1493 1541 for i in range(count):
1494 1542 for node in nodes:
1495 1543 try:
1496 1544 nodemap[node]
1497 1545 except error.RevlogError:
1498 1546 pass
1499 1547
1500 1548 benches = [
1501 1549 (constructor, b'revlog constructor'),
1502 1550 (read, b'read'),
1503 1551 (parseindex, b'create index object'),
1504 1552 (lambda: getentry(0), b'retrieve index entry for rev 0'),
1505 1553 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
1506 1554 (lambda: resolvenode(node0), b'look up node at rev 0'),
1507 1555 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
1508 1556 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
1509 1557 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
1510 1558 (lambda: resolvenode(node100), b'look up node at tip'),
1511 1559 # 2x variation is to measure caching impact.
1512 1560 (lambda: resolvenodes(allnodes),
1513 1561 b'look up all nodes (forward)'),
1514 1562 (lambda: resolvenodes(allnodes, 2),
1515 1563 b'look up all nodes 2x (forward)'),
1516 1564 (lambda: resolvenodes(allnodesrev),
1517 1565 b'look up all nodes (reverse)'),
1518 1566 (lambda: resolvenodes(allnodesrev, 2),
1519 1567 b'look up all nodes 2x (reverse)'),
1520 1568 (lambda: getentries(allrevs),
1521 1569 b'retrieve all index entries (forward)'),
1522 1570 (lambda: getentries(allrevs, 2),
1523 1571 b'retrieve all index entries 2x (forward)'),
1524 1572 (lambda: getentries(allrevsrev),
1525 1573 b'retrieve all index entries (reverse)'),
1526 1574 (lambda: getentries(allrevsrev, 2),
1527 1575 b'retrieve all index entries 2x (reverse)'),
1528 1576 ]
1529 1577
1530 1578 for fn, title in benches:
1531 1579 timer, fm = gettimer(ui, opts)
1532 1580 timer(fn, title=title)
1533 1581 fm.end()
1534 1582
1535 1583 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
1536 1584 [(b'd', b'dist', 100, b'distance between the revisions'),
1537 1585 (b's', b'startrev', 0, b'revision to start reading at'),
1538 1586 (b'', b'reverse', False, b'read in reverse')],
1539 1587 b'-c|-m|FILE')
1540 1588 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
1541 1589 **opts):
1542 1590 """Benchmark reading a series of revisions from a revlog.
1543 1591
1544 1592 By default, we read every ``-d/--dist`` revision from 0 to tip of
1545 1593 the specified revlog.
1546 1594
1547 1595 The start revision can be defined via ``-s/--startrev``.
1548 1596 """
1549 1597 opts = _byteskwargs(opts)
1550 1598
1551 1599 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
1552 1600 rllen = getlen(ui)(rl)
1553 1601
1554 1602 if startrev < 0:
1555 1603 startrev = rllen + startrev
1556 1604
1557 1605 def d():
1558 1606 rl.clearcaches()
1559 1607
1560 1608 beginrev = startrev
1561 1609 endrev = rllen
1562 1610 dist = opts[b'dist']
1563 1611
1564 1612 if reverse:
1565 1613 beginrev, endrev = endrev - 1, beginrev - 1
1566 1614 dist = -1 * dist
1567 1615
1568 1616 for x in _xrange(beginrev, endrev, dist):
1569 1617 # Old revisions don't support passing int.
1570 1618 n = rl.node(x)
1571 1619 rl.revision(n)
1572 1620
1573 1621 timer, fm = gettimer(ui, opts)
1574 1622 timer(d)
1575 1623 fm.end()
1576 1624
1577 1625 @command(b'perfrevlogwrite', revlogopts + formatteropts +
1578 1626 [(b's', b'startrev', 1000, b'revision to start writing at'),
1579 1627 (b'', b'stoprev', -1, b'last revision to write'),
1580 1628 (b'', b'count', 3, b'last revision to write'),
1581 1629 (b'', b'details', False, b'print timing for every revisions tested'),
1582 1630 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
1583 1631 (b'', b'lazydeltabase', True, b'try the provided delta first'),
1584 1632 ],
1585 1633 b'-c|-m|FILE')
1586 1634 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
1587 1635 """Benchmark writing a series of revisions to a revlog.
1588 1636
1589 1637 Possible source values are:
1590 1638 * `full`: add from a full text (default).
1591 1639 * `parent-1`: add from a delta to the first parent
1592 1640 * `parent-2`: add from a delta to the second parent if it exists
1593 1641 (use a delta from the first parent otherwise)
1594 1642 * `parent-smallest`: add from the smallest delta (either p1 or p2)
1595 1643 * `storage`: add from the existing precomputed deltas
1596 1644 """
1597 1645 opts = _byteskwargs(opts)
1598 1646
1599 1647 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
1600 1648 rllen = getlen(ui)(rl)
1601 1649 if startrev < 0:
1602 1650 startrev = rllen + startrev
1603 1651 if stoprev < 0:
1604 1652 stoprev = rllen + stoprev
1605 1653
1606 1654 lazydeltabase = opts['lazydeltabase']
1607 1655 source = opts['source']
1608 1656 validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest',
1609 1657 b'storage')
1610 1658 if source not in validsource:
1611 1659 raise error.Abort('invalid source type: %s' % source)
1612 1660
1613 1661 ### actually gather results
1614 1662 count = opts['count']
1615 1663 if count <= 0:
1616 1664 raise error.Abort('invalide run count: %d' % count)
1617 1665 allresults = []
1618 1666 for c in range(count):
1619 1667 timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1,
1620 1668 lazydeltabase=lazydeltabase)
1621 1669 allresults.append(timing)
1622 1670
1623 1671 ### consolidate the results in a single list
1624 1672 results = []
1625 1673 for idx, (rev, t) in enumerate(allresults[0]):
1626 1674 ts = [t]
1627 1675 for other in allresults[1:]:
1628 1676 orev, ot = other[idx]
1629 1677 assert orev == rev
1630 1678 ts.append(ot)
1631 1679 results.append((rev, ts))
1632 1680 resultcount = len(results)
1633 1681
1634 1682 ### Compute and display relevant statistics
1635 1683
1636 1684 # get a formatter
1637 1685 fm = ui.formatter(b'perf', opts)
1638 1686 displayall = ui.configbool(b"perf", b"all-timing", False)
1639 1687
1640 1688 # print individual details if requested
1641 1689 if opts['details']:
1642 1690 for idx, item in enumerate(results, 1):
1643 1691 rev, data = item
1644 1692 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
1645 1693 formatone(fm, data, title=title, displayall=displayall)
1646 1694
1647 1695 # sorts results by median time
1648 1696 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
1649 1697 # list of (name, index) to display)
1650 1698 relevants = [
1651 1699 ("min", 0),
1652 1700 ("10%", resultcount * 10 // 100),
1653 1701 ("25%", resultcount * 25 // 100),
1654 1702 ("50%", resultcount * 70 // 100),
1655 1703 ("75%", resultcount * 75 // 100),
1656 1704 ("90%", resultcount * 90 // 100),
1657 1705 ("95%", resultcount * 95 // 100),
1658 1706 ("99%", resultcount * 99 // 100),
1659 1707 ("max", -1),
1660 1708 ]
1661 1709 if not ui.quiet:
1662 1710 for name, idx in relevants:
1663 1711 data = results[idx]
1664 1712 title = '%s of %d, rev %d' % (name, resultcount, data[0])
1665 1713 formatone(fm, data[1], title=title, displayall=displayall)
1666 1714
1667 1715 # XXX summing that many float will not be very precise, we ignore this fact
1668 1716 # for now
1669 1717 totaltime = []
1670 1718 for item in allresults:
1671 1719 totaltime.append((sum(x[1][0] for x in item),
1672 1720 sum(x[1][1] for x in item),
1673 1721 sum(x[1][2] for x in item),)
1674 1722 )
1675 1723 formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
1676 1724 displayall=displayall)
1677 1725 fm.end()
1678 1726
1679 1727 class _faketr(object):
1680 1728 def add(s, x, y, z=None):
1681 1729 return None
1682 1730
1683 1731 def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None,
1684 1732 lazydeltabase=True):
1685 1733 timings = []
1686 1734 tr = _faketr()
1687 1735 with _temprevlog(ui, orig, startrev) as dest:
1688 1736 dest._lazydeltabase = lazydeltabase
1689 1737 revs = list(orig.revs(startrev, stoprev))
1690 1738 total = len(revs)
1691 1739 topic = 'adding'
1692 1740 if runidx is not None:
1693 1741 topic += ' (run #%d)' % runidx
1694 1742 for idx, rev in enumerate(revs):
1695 1743 ui.progress(topic, idx, unit='revs', total=total)
1696 1744 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
1697 1745 with timeone() as r:
1698 1746 dest.addrawrevision(*addargs, **addkwargs)
1699 1747 timings.append((rev, r[0]))
1700 1748 ui.progress(topic, total, unit='revs', total=total)
1701 1749 ui.progress(topic, None, unit='revs', total=total)
1702 1750 return timings
1703 1751
1704 1752 def _getrevisionseed(orig, rev, tr, source):
1705 1753 from mercurial.node import nullid
1706 1754
1707 1755 linkrev = orig.linkrev(rev)
1708 1756 node = orig.node(rev)
1709 1757 p1, p2 = orig.parents(node)
1710 1758 flags = orig.flags(rev)
1711 1759 cachedelta = None
1712 1760 text = None
1713 1761
1714 1762 if source == b'full':
1715 1763 text = orig.revision(rev)
1716 1764 elif source == b'parent-1':
1717 1765 baserev = orig.rev(p1)
1718 1766 cachedelta = (baserev, orig.revdiff(p1, rev))
1719 1767 elif source == b'parent-2':
1720 1768 parent = p2
1721 1769 if p2 == nullid:
1722 1770 parent = p1
1723 1771 baserev = orig.rev(parent)
1724 1772 cachedelta = (baserev, orig.revdiff(parent, rev))
1725 1773 elif source == b'parent-smallest':
1726 1774 p1diff = orig.revdiff(p1, rev)
1727 1775 parent = p1
1728 1776 diff = p1diff
1729 1777 if p2 != nullid:
1730 1778 p2diff = orig.revdiff(p2, rev)
1731 1779 if len(p1diff) > len(p2diff):
1732 1780 parent = p2
1733 1781 diff = p2diff
1734 1782 baserev = orig.rev(parent)
1735 1783 cachedelta = (baserev, diff)
1736 1784 elif source == b'storage':
1737 1785 baserev = orig.deltaparent(rev)
1738 1786 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
1739 1787
1740 1788 return ((text, tr, linkrev, p1, p2),
1741 1789 {'node': node, 'flags': flags, 'cachedelta': cachedelta})
1742 1790
1743 1791 @contextlib.contextmanager
1744 1792 def _temprevlog(ui, orig, truncaterev):
1745 1793 from mercurial import vfs as vfsmod
1746 1794
1747 1795 if orig._inline:
1748 1796 raise error.Abort('not supporting inline revlog (yet)')
1749 1797
1750 1798 origindexpath = orig.opener.join(orig.indexfile)
1751 1799 origdatapath = orig.opener.join(orig.datafile)
1752 1800 indexname = 'revlog.i'
1753 1801 dataname = 'revlog.d'
1754 1802
1755 1803 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
1756 1804 try:
1757 1805 # copy the data file in a temporary directory
1758 1806 ui.debug('copying data in %s\n' % tmpdir)
1759 1807 destindexpath = os.path.join(tmpdir, 'revlog.i')
1760 1808 destdatapath = os.path.join(tmpdir, 'revlog.d')
1761 1809 shutil.copyfile(origindexpath, destindexpath)
1762 1810 shutil.copyfile(origdatapath, destdatapath)
1763 1811
1764 1812 # remove the data we want to add again
1765 1813 ui.debug('truncating data to be rewritten\n')
1766 1814 with open(destindexpath, 'ab') as index:
1767 1815 index.seek(0)
1768 1816 index.truncate(truncaterev * orig._io.size)
1769 1817 with open(destdatapath, 'ab') as data:
1770 1818 data.seek(0)
1771 1819 data.truncate(orig.start(truncaterev))
1772 1820
1773 1821 # instantiate a new revlog from the temporary copy
1774 1822 ui.debug('truncating adding to be rewritten\n')
1775 1823 vfs = vfsmod.vfs(tmpdir)
1776 1824 vfs.options = getattr(orig.opener, 'options', None)
1777 1825
1778 1826 dest = revlog.revlog(vfs,
1779 1827 indexfile=indexname,
1780 1828 datafile=dataname)
1781 1829 if dest._inline:
1782 1830 raise error.Abort('not supporting inline revlog (yet)')
1783 1831 # make sure internals are initialized
1784 1832 dest.revision(len(dest) - 1)
1785 1833 yield dest
1786 1834 del dest, vfs
1787 1835 finally:
1788 1836 shutil.rmtree(tmpdir, True)
1789 1837
1790 1838 @command(b'perfrevlogchunks', revlogopts + formatteropts +
1791 1839 [(b'e', b'engines', b'', b'compression engines to use'),
1792 1840 (b's', b'startrev', 0, b'revision to start at')],
1793 1841 b'-c|-m|FILE')
1794 1842 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
1795 1843 """Benchmark operations on revlog chunks.
1796 1844
1797 1845 Logically, each revlog is a collection of fulltext revisions. However,
1798 1846 stored within each revlog are "chunks" of possibly compressed data. This
1799 1847 data needs to be read and decompressed or compressed and written.
1800 1848
1801 1849 This command measures the time it takes to read+decompress and recompress
1802 1850 chunks in a revlog. It effectively isolates I/O and compression performance.
1803 1851 For measurements of higher-level operations like resolving revisions,
1804 1852 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
1805 1853 """
1806 1854 opts = _byteskwargs(opts)
1807 1855
1808 1856 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
1809 1857
1810 1858 # _chunkraw was renamed to _getsegmentforrevs.
1811 1859 try:
1812 1860 segmentforrevs = rl._getsegmentforrevs
1813 1861 except AttributeError:
1814 1862 segmentforrevs = rl._chunkraw
1815 1863
1816 1864 # Verify engines argument.
1817 1865 if engines:
1818 1866 engines = set(e.strip() for e in engines.split(b','))
1819 1867 for engine in engines:
1820 1868 try:
1821 1869 util.compressionengines[engine]
1822 1870 except KeyError:
1823 1871 raise error.Abort(b'unknown compression engine: %s' % engine)
1824 1872 else:
1825 1873 engines = []
1826 1874 for e in util.compengines:
1827 1875 engine = util.compengines[e]
1828 1876 try:
1829 1877 if engine.available():
1830 1878 engine.revlogcompressor().compress(b'dummy')
1831 1879 engines.append(e)
1832 1880 except NotImplementedError:
1833 1881 pass
1834 1882
1835 1883 revs = list(rl.revs(startrev, len(rl) - 1))
1836 1884
1837 1885 def rlfh(rl):
1838 1886 if rl._inline:
1839 1887 return getsvfs(repo)(rl.indexfile)
1840 1888 else:
1841 1889 return getsvfs(repo)(rl.datafile)
1842 1890
1843 1891 def doread():
1844 1892 rl.clearcaches()
1845 1893 for rev in revs:
1846 1894 segmentforrevs(rev, rev)
1847 1895
1848 1896 def doreadcachedfh():
1849 1897 rl.clearcaches()
1850 1898 fh = rlfh(rl)
1851 1899 for rev in revs:
1852 1900 segmentforrevs(rev, rev, df=fh)
1853 1901
1854 1902 def doreadbatch():
1855 1903 rl.clearcaches()
1856 1904 segmentforrevs(revs[0], revs[-1])
1857 1905
1858 1906 def doreadbatchcachedfh():
1859 1907 rl.clearcaches()
1860 1908 fh = rlfh(rl)
1861 1909 segmentforrevs(revs[0], revs[-1], df=fh)
1862 1910
1863 1911 def dochunk():
1864 1912 rl.clearcaches()
1865 1913 fh = rlfh(rl)
1866 1914 for rev in revs:
1867 1915 rl._chunk(rev, df=fh)
1868 1916
1869 1917 chunks = [None]
1870 1918
1871 1919 def dochunkbatch():
1872 1920 rl.clearcaches()
1873 1921 fh = rlfh(rl)
1874 1922 # Save chunks as a side-effect.
1875 1923 chunks[0] = rl._chunks(revs, df=fh)
1876 1924
1877 1925 def docompress(compressor):
1878 1926 rl.clearcaches()
1879 1927
1880 1928 try:
1881 1929 # Swap in the requested compression engine.
1882 1930 oldcompressor = rl._compressor
1883 1931 rl._compressor = compressor
1884 1932 for chunk in chunks[0]:
1885 1933 rl.compress(chunk)
1886 1934 finally:
1887 1935 rl._compressor = oldcompressor
1888 1936
1889 1937 benches = [
1890 1938 (lambda: doread(), b'read'),
1891 1939 (lambda: doreadcachedfh(), b'read w/ reused fd'),
1892 1940 (lambda: doreadbatch(), b'read batch'),
1893 1941 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
1894 1942 (lambda: dochunk(), b'chunk'),
1895 1943 (lambda: dochunkbatch(), b'chunk batch'),
1896 1944 ]
1897 1945
1898 1946 for engine in sorted(engines):
1899 1947 compressor = util.compengines[engine].revlogcompressor()
1900 1948 benches.append((functools.partial(docompress, compressor),
1901 1949 b'compress w/ %s' % engine))
1902 1950
1903 1951 for fn, title in benches:
1904 1952 timer, fm = gettimer(ui, opts)
1905 1953 timer(fn, title=title)
1906 1954 fm.end()
1907 1955
1908 1956 @command(b'perfrevlogrevision', revlogopts + formatteropts +
1909 1957 [(b'', b'cache', False, b'use caches instead of clearing')],
1910 1958 b'-c|-m|FILE REV')
1911 1959 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
1912 1960 """Benchmark obtaining a revlog revision.
1913 1961
1914 1962 Obtaining a revlog revision consists of roughly the following steps:
1915 1963
1916 1964 1. Compute the delta chain
1917 1965 2. Slice the delta chain if applicable
1918 1966 3. Obtain the raw chunks for that delta chain
1919 1967 4. Decompress each raw chunk
1920 1968 5. Apply binary patches to obtain fulltext
1921 1969 6. Verify hash of fulltext
1922 1970
1923 1971 This command measures the time spent in each of these phases.
1924 1972 """
1925 1973 opts = _byteskwargs(opts)
1926 1974
1927 1975 if opts.get(b'changelog') or opts.get(b'manifest'):
1928 1976 file_, rev = None, file_
1929 1977 elif rev is None:
1930 1978 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
1931 1979
1932 1980 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
1933 1981
1934 1982 # _chunkraw was renamed to _getsegmentforrevs.
1935 1983 try:
1936 1984 segmentforrevs = r._getsegmentforrevs
1937 1985 except AttributeError:
1938 1986 segmentforrevs = r._chunkraw
1939 1987
1940 1988 node = r.lookup(rev)
1941 1989 rev = r.rev(node)
1942 1990
1943 1991 def getrawchunks(data, chain):
1944 1992 start = r.start
1945 1993 length = r.length
1946 1994 inline = r._inline
1947 1995 iosize = r._io.size
1948 1996 buffer = util.buffer
1949 1997
1950 1998 chunks = []
1951 1999 ladd = chunks.append
1952 2000 for idx, item in enumerate(chain):
1953 2001 offset = start(item[0])
1954 2002 bits = data[idx]
1955 2003 for rev in item:
1956 2004 chunkstart = start(rev)
1957 2005 if inline:
1958 2006 chunkstart += (rev + 1) * iosize
1959 2007 chunklength = length(rev)
1960 2008 ladd(buffer(bits, chunkstart - offset, chunklength))
1961 2009
1962 2010 return chunks
1963 2011
1964 2012 def dodeltachain(rev):
1965 2013 if not cache:
1966 2014 r.clearcaches()
1967 2015 r._deltachain(rev)
1968 2016
1969 2017 def doread(chain):
1970 2018 if not cache:
1971 2019 r.clearcaches()
1972 2020 for item in slicedchain:
1973 2021 segmentforrevs(item[0], item[-1])
1974 2022
1975 2023 def doslice(r, chain, size):
1976 2024 for s in slicechunk(r, chain, targetsize=size):
1977 2025 pass
1978 2026
1979 2027 def dorawchunks(data, chain):
1980 2028 if not cache:
1981 2029 r.clearcaches()
1982 2030 getrawchunks(data, chain)
1983 2031
1984 2032 def dodecompress(chunks):
1985 2033 decomp = r.decompress
1986 2034 for chunk in chunks:
1987 2035 decomp(chunk)
1988 2036
1989 2037 def dopatch(text, bins):
1990 2038 if not cache:
1991 2039 r.clearcaches()
1992 2040 mdiff.patches(text, bins)
1993 2041
1994 2042 def dohash(text):
1995 2043 if not cache:
1996 2044 r.clearcaches()
1997 2045 r.checkhash(text, node, rev=rev)
1998 2046
1999 2047 def dorevision():
2000 2048 if not cache:
2001 2049 r.clearcaches()
2002 2050 r.revision(node)
2003 2051
2004 2052 try:
2005 2053 from mercurial.revlogutils.deltas import slicechunk
2006 2054 except ImportError:
2007 2055 slicechunk = getattr(revlog, '_slicechunk', None)
2008 2056
2009 2057 size = r.length(rev)
2010 2058 chain = r._deltachain(rev)[0]
2011 2059 if not getattr(r, '_withsparseread', False):
2012 2060 slicedchain = (chain,)
2013 2061 else:
2014 2062 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
2015 2063 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
2016 2064 rawchunks = getrawchunks(data, slicedchain)
2017 2065 bins = r._chunks(chain)
2018 2066 text = bytes(bins[0])
2019 2067 bins = bins[1:]
2020 2068 text = mdiff.patches(text, bins)
2021 2069
2022 2070 benches = [
2023 2071 (lambda: dorevision(), b'full'),
2024 2072 (lambda: dodeltachain(rev), b'deltachain'),
2025 2073 (lambda: doread(chain), b'read'),
2026 2074 ]
2027 2075
2028 2076 if getattr(r, '_withsparseread', False):
2029 2077 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
2030 2078 benches.append(slicing)
2031 2079
2032 2080 benches.extend([
2033 2081 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
2034 2082 (lambda: dodecompress(rawchunks), b'decompress'),
2035 2083 (lambda: dopatch(text, bins), b'patch'),
2036 2084 (lambda: dohash(text), b'hash'),
2037 2085 ])
2038 2086
2039 2087 timer, fm = gettimer(ui, opts)
2040 2088 for fn, title in benches:
2041 2089 timer(fn, title=title)
2042 2090 fm.end()
2043 2091
2044 2092 @command(b'perfrevset',
2045 2093 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
2046 2094 (b'', b'contexts', False, b'obtain changectx for each revision')]
2047 2095 + formatteropts, b"REVSET")
2048 2096 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
2049 2097 """benchmark the execution time of a revset
2050 2098
2051 2099 Use the --clean option if need to evaluate the impact of build volatile
2052 2100 revisions set cache on the revset execution. Volatile cache hold filtered
2053 2101 and obsolete related cache."""
2054 2102 opts = _byteskwargs(opts)
2055 2103
2056 2104 timer, fm = gettimer(ui, opts)
2057 2105 def d():
2058 2106 if clear:
2059 2107 repo.invalidatevolatilesets()
2060 2108 if contexts:
2061 2109 for ctx in repo.set(expr): pass
2062 2110 else:
2063 2111 for r in repo.revs(expr): pass
2064 2112 timer(d)
2065 2113 fm.end()
2066 2114
2067 2115 @command(b'perfvolatilesets',
2068 2116 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
2069 2117 ] + formatteropts)
2070 2118 def perfvolatilesets(ui, repo, *names, **opts):
2071 2119 """benchmark the computation of various volatile set
2072 2120
2073 2121 Volatile set computes element related to filtering and obsolescence."""
2074 2122 opts = _byteskwargs(opts)
2075 2123 timer, fm = gettimer(ui, opts)
2076 2124 repo = repo.unfiltered()
2077 2125
2078 2126 def getobs(name):
2079 2127 def d():
2080 2128 repo.invalidatevolatilesets()
2081 2129 if opts[b'clear_obsstore']:
2082 2130 clearfilecache(repo, b'obsstore')
2083 2131 obsolete.getrevs(repo, name)
2084 2132 return d
2085 2133
2086 2134 allobs = sorted(obsolete.cachefuncs)
2087 2135 if names:
2088 2136 allobs = [n for n in allobs if n in names]
2089 2137
2090 2138 for name in allobs:
2091 2139 timer(getobs(name), title=name)
2092 2140
2093 2141 def getfiltered(name):
2094 2142 def d():
2095 2143 repo.invalidatevolatilesets()
2096 2144 if opts[b'clear_obsstore']:
2097 2145 clearfilecache(repo, b'obsstore')
2098 2146 repoview.filterrevs(repo, name)
2099 2147 return d
2100 2148
2101 2149 allfilter = sorted(repoview.filtertable)
2102 2150 if names:
2103 2151 allfilter = [n for n in allfilter if n in names]
2104 2152
2105 2153 for name in allfilter:
2106 2154 timer(getfiltered(name), title=name)
2107 2155 fm.end()
2108 2156
2109 2157 @command(b'perfbranchmap',
2110 2158 [(b'f', b'full', False,
2111 2159 b'Includes build time of subset'),
2112 2160 (b'', b'clear-revbranch', False,
2113 2161 b'purge the revbranch cache between computation'),
2114 2162 ] + formatteropts)
2115 2163 def perfbranchmap(ui, repo, *filternames, **opts):
2116 2164 """benchmark the update of a branchmap
2117 2165
2118 2166 This benchmarks the full repo.branchmap() call with read and write disabled
2119 2167 """
2120 2168 opts = _byteskwargs(opts)
2121 2169 full = opts.get(b"full", False)
2122 2170 clear_revbranch = opts.get(b"clear_revbranch", False)
2123 2171 timer, fm = gettimer(ui, opts)
2124 2172 def getbranchmap(filtername):
2125 2173 """generate a benchmark function for the filtername"""
2126 2174 if filtername is None:
2127 2175 view = repo
2128 2176 else:
2129 2177 view = repo.filtered(filtername)
2130 2178 def d():
2131 2179 if clear_revbranch:
2132 2180 repo.revbranchcache()._clear()
2133 2181 if full:
2134 2182 view._branchcaches.clear()
2135 2183 else:
2136 2184 view._branchcaches.pop(filtername, None)
2137 2185 view.branchmap()
2138 2186 return d
2139 2187 # add filter in smaller subset to bigger subset
2140 2188 possiblefilters = set(repoview.filtertable)
2141 2189 if filternames:
2142 2190 possiblefilters &= set(filternames)
2143 2191 subsettable = getbranchmapsubsettable()
2144 2192 allfilters = []
2145 2193 while possiblefilters:
2146 2194 for name in possiblefilters:
2147 2195 subset = subsettable.get(name)
2148 2196 if subset not in possiblefilters:
2149 2197 break
2150 2198 else:
2151 2199 assert False, b'subset cycle %s!' % possiblefilters
2152 2200 allfilters.append(name)
2153 2201 possiblefilters.remove(name)
2154 2202
2155 2203 # warm the cache
2156 2204 if not full:
2157 2205 for name in allfilters:
2158 2206 repo.filtered(name).branchmap()
2159 2207 if not filternames or b'unfiltered' in filternames:
2160 2208 # add unfiltered
2161 2209 allfilters.append(None)
2162 2210
2163 2211 branchcacheread = safeattrsetter(branchmap, b'read')
2164 2212 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
2165 2213 branchcacheread.set(lambda repo: None)
2166 2214 branchcachewrite.set(lambda bc, repo: None)
2167 2215 try:
2168 2216 for name in allfilters:
2169 2217 printname = name
2170 2218 if name is None:
2171 2219 printname = b'unfiltered'
2172 2220 timer(getbranchmap(name), title=str(printname))
2173 2221 finally:
2174 2222 branchcacheread.restore()
2175 2223 branchcachewrite.restore()
2176 2224 fm.end()
2177 2225
2178 2226 @command(b'perfbranchmapload', [
2179 2227 (b'f', b'filter', b'', b'Specify repoview filter'),
2180 2228 (b'', b'list', False, b'List brachmap filter caches'),
2181 2229 ] + formatteropts)
2182 2230 def perfbranchmapread(ui, repo, filter=b'', list=False, **opts):
2183 2231 """benchmark reading the branchmap"""
2184 2232 opts = _byteskwargs(opts)
2185 2233
2186 2234 if list:
2187 2235 for name, kind, st in repo.cachevfs.readdir(stat=True):
2188 2236 if name.startswith(b'branch2'):
2189 2237 filtername = name.partition(b'-')[2] or b'unfiltered'
2190 2238 ui.status(b'%s - %s\n'
2191 2239 % (filtername, util.bytecount(st.st_size)))
2192 2240 return
2193 2241 if filter:
2194 2242 repo = repoview.repoview(repo, filter)
2195 2243 else:
2196 2244 repo = repo.unfiltered()
2197 2245 # try once without timer, the filter may not be cached
2198 2246 if branchmap.read(repo) is None:
2199 2247 raise error.Abort(b'No brachmap cached for %s repo'
2200 2248 % (filter or b'unfiltered'))
2201 2249 timer, fm = gettimer(ui, opts)
2202 2250 timer(lambda: branchmap.read(repo) and None)
2203 2251 fm.end()
2204 2252
2205 2253 @command(b'perfloadmarkers')
2206 2254 def perfloadmarkers(ui, repo):
2207 2255 """benchmark the time to parse the on-disk markers for a repo
2208 2256
2209 2257 Result is the number of markers in the repo."""
2210 2258 timer, fm = gettimer(ui)
2211 2259 svfs = getsvfs(repo)
2212 2260 timer(lambda: len(obsolete.obsstore(svfs)))
2213 2261 fm.end()
2214 2262
2215 2263 @command(b'perflrucachedict', formatteropts +
2216 2264 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
2217 2265 (b'', b'mincost', 0, b'smallest cost of items in cache'),
2218 2266 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
2219 2267 (b'', b'size', 4, b'size of cache'),
2220 2268 (b'', b'gets', 10000, b'number of key lookups'),
2221 2269 (b'', b'sets', 10000, b'number of key sets'),
2222 2270 (b'', b'mixed', 10000, b'number of mixed mode operations'),
2223 2271 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
2224 2272 norepo=True)
2225 2273 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
2226 2274 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
2227 2275 opts = _byteskwargs(opts)
2228 2276
2229 2277 def doinit():
2230 2278 for i in _xrange(10000):
2231 2279 util.lrucachedict(size)
2232 2280
2233 2281 costrange = list(range(mincost, maxcost + 1))
2234 2282
2235 2283 values = []
2236 2284 for i in _xrange(size):
2237 2285 values.append(random.randint(0, _maxint))
2238 2286
2239 2287 # Get mode fills the cache and tests raw lookup performance with no
2240 2288 # eviction.
2241 2289 getseq = []
2242 2290 for i in _xrange(gets):
2243 2291 getseq.append(random.choice(values))
2244 2292
2245 2293 def dogets():
2246 2294 d = util.lrucachedict(size)
2247 2295 for v in values:
2248 2296 d[v] = v
2249 2297 for key in getseq:
2250 2298 value = d[key]
2251 2299 value # silence pyflakes warning
2252 2300
2253 2301 def dogetscost():
2254 2302 d = util.lrucachedict(size, maxcost=costlimit)
2255 2303 for i, v in enumerate(values):
2256 2304 d.insert(v, v, cost=costs[i])
2257 2305 for key in getseq:
2258 2306 try:
2259 2307 value = d[key]
2260 2308 value # silence pyflakes warning
2261 2309 except KeyError:
2262 2310 pass
2263 2311
2264 2312 # Set mode tests insertion speed with cache eviction.
2265 2313 setseq = []
2266 2314 costs = []
2267 2315 for i in _xrange(sets):
2268 2316 setseq.append(random.randint(0, _maxint))
2269 2317 costs.append(random.choice(costrange))
2270 2318
2271 2319 def doinserts():
2272 2320 d = util.lrucachedict(size)
2273 2321 for v in setseq:
2274 2322 d.insert(v, v)
2275 2323
2276 2324 def doinsertscost():
2277 2325 d = util.lrucachedict(size, maxcost=costlimit)
2278 2326 for i, v in enumerate(setseq):
2279 2327 d.insert(v, v, cost=costs[i])
2280 2328
2281 2329 def dosets():
2282 2330 d = util.lrucachedict(size)
2283 2331 for v in setseq:
2284 2332 d[v] = v
2285 2333
2286 2334 # Mixed mode randomly performs gets and sets with eviction.
2287 2335 mixedops = []
2288 2336 for i in _xrange(mixed):
2289 2337 r = random.randint(0, 100)
2290 2338 if r < mixedgetfreq:
2291 2339 op = 0
2292 2340 else:
2293 2341 op = 1
2294 2342
2295 2343 mixedops.append((op,
2296 2344 random.randint(0, size * 2),
2297 2345 random.choice(costrange)))
2298 2346
2299 2347 def domixed():
2300 2348 d = util.lrucachedict(size)
2301 2349
2302 2350 for op, v, cost in mixedops:
2303 2351 if op == 0:
2304 2352 try:
2305 2353 d[v]
2306 2354 except KeyError:
2307 2355 pass
2308 2356 else:
2309 2357 d[v] = v
2310 2358
2311 2359 def domixedcost():
2312 2360 d = util.lrucachedict(size, maxcost=costlimit)
2313 2361
2314 2362 for op, v, cost in mixedops:
2315 2363 if op == 0:
2316 2364 try:
2317 2365 d[v]
2318 2366 except KeyError:
2319 2367 pass
2320 2368 else:
2321 2369 d.insert(v, v, cost=cost)
2322 2370
2323 2371 benches = [
2324 2372 (doinit, b'init'),
2325 2373 ]
2326 2374
2327 2375 if costlimit:
2328 2376 benches.extend([
2329 2377 (dogetscost, b'gets w/ cost limit'),
2330 2378 (doinsertscost, b'inserts w/ cost limit'),
2331 2379 (domixedcost, b'mixed w/ cost limit'),
2332 2380 ])
2333 2381 else:
2334 2382 benches.extend([
2335 2383 (dogets, b'gets'),
2336 2384 (doinserts, b'inserts'),
2337 2385 (dosets, b'sets'),
2338 2386 (domixed, b'mixed')
2339 2387 ])
2340 2388
2341 2389 for fn, title in benches:
2342 2390 timer, fm = gettimer(ui, opts)
2343 2391 timer(fn, title=title)
2344 2392 fm.end()
2345 2393
2346 2394 @command(b'perfwrite', formatteropts)
2347 2395 def perfwrite(ui, repo, **opts):
2348 2396 """microbenchmark ui.write
2349 2397 """
2350 2398 opts = _byteskwargs(opts)
2351 2399
2352 2400 timer, fm = gettimer(ui, opts)
2353 2401 def write():
2354 2402 for i in range(100000):
2355 2403 ui.write((b'Testing write performance\n'))
2356 2404 timer(write)
2357 2405 fm.end()
2358 2406
2359 2407 def uisetup(ui):
2360 2408 if (util.safehasattr(cmdutil, b'openrevlog') and
2361 2409 not util.safehasattr(commands, b'debugrevlogopts')):
2362 2410 # for "historical portability":
2363 2411 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
2364 2412 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
2365 2413 # openrevlog() should cause failure, because it has been
2366 2414 # available since 3.5 (or 49c583ca48c4).
2367 2415 def openrevlog(orig, repo, cmd, file_, opts):
2368 2416 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
2369 2417 raise error.Abort(b"This version doesn't support --dir option",
2370 2418 hint=b"use 3.5 or later")
2371 2419 return orig(repo, cmd, file_, opts)
2372 2420 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
@@ -1,282 +1,285
1 1 #require test-repo
2 2
3 3 Set vars:
4 4
5 5 $ . "$TESTDIR/helpers-testrepo.sh"
6 6 $ CONTRIBDIR="$TESTDIR/../contrib"
7 7
8 8 Prepare repo:
9 9
10 10 $ hg init
11 11
12 12 $ echo this is file a > a
13 13 $ hg add a
14 14 $ hg commit -m first
15 15
16 16 $ echo adding to file a >> a
17 17 $ hg commit -m second
18 18
19 19 $ echo adding more to file a >> a
20 20 $ hg commit -m third
21 21
22 22 $ hg up -r 0
23 23 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
24 24 $ echo merge-this >> a
25 25 $ hg commit -m merge-able
26 26 created new head
27 27
28 28 $ hg up -r 2
29 29 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
30 30
31 31 perfstatus
32 32
33 33 $ cat >> $HGRCPATH << EOF
34 34 > [extensions]
35 35 > perfstatusext=$CONTRIBDIR/perf.py
36 36 > [perf]
37 37 > presleep=0
38 38 > stub=on
39 39 > parentscount=1
40 40 > EOF
41 41 $ hg help perfstatusext
42 42 perfstatusext extension - helper extension to measure performance
43 43
44 44 list of commands:
45 45
46 46 perfaddremove
47 47 (no help text available)
48 48 perfancestors
49 49 (no help text available)
50 50 perfancestorset
51 51 (no help text available)
52 52 perfannotate (no help text available)
53 53 perfbdiff benchmark a bdiff between revisions
54 54 perfbookmarks
55 55 benchmark parsing bookmarks from disk to memory
56 56 perfbranchmap
57 57 benchmark the update of a branchmap
58 58 perfbranchmapload
59 59 benchmark reading the branchmap
60 60 perfbundleread
61 61 Benchmark reading of bundle files.
62 62 perfcca (no help text available)
63 63 perfchangegroupchangelog
64 64 Benchmark producing a changelog group for a changegroup.
65 65 perfchangeset
66 66 (no help text available)
67 67 perfctxfiles (no help text available)
68 68 perfdiffwd Profile diff of working directory changes
69 69 perfdirfoldmap
70 70 (no help text available)
71 71 perfdirs (no help text available)
72 72 perfdirstate (no help text available)
73 73 perfdirstatedirs
74 74 (no help text available)
75 75 perfdirstatefoldmap
76 76 (no help text available)
77 77 perfdirstatewrite
78 78 (no help text available)
79 79 perffncacheencode
80 80 (no help text available)
81 81 perffncacheload
82 82 (no help text available)
83 83 perffncachewrite
84 84 (no help text available)
85 85 perfheads (no help text available)
86 perfhelper-tracecopies
87 find statistic about potential parameters for the
88 'perftracecopies'
86 89 perfindex (no help text available)
87 90 perflinelogedits
88 91 (no help text available)
89 92 perfloadmarkers
90 93 benchmark the time to parse the on-disk markers for a repo
91 94 perflog (no help text available)
92 95 perflookup (no help text available)
93 96 perflrucachedict
94 97 (no help text available)
95 98 perfmanifest benchmark the time to read a manifest from disk and return a
96 99 usable
97 100 perfmergecalculate
98 101 (no help text available)
99 102 perfmoonwalk benchmark walking the changelog backwards
100 103 perfnodelookup
101 104 (no help text available)
102 105 perfparents (no help text available)
103 106 perfpathcopies
104 107 (no help text available)
105 108 perfphases benchmark phasesets computation
106 109 perfphasesremote
107 110 benchmark time needed to analyse phases of the remote server
108 111 perfrawfiles (no help text available)
109 112 perfrevlogchunks
110 113 Benchmark operations on revlog chunks.
111 114 perfrevlogindex
112 115 Benchmark operations against a revlog index.
113 116 perfrevlogrevision
114 117 Benchmark obtaining a revlog revision.
115 118 perfrevlogrevisions
116 119 Benchmark reading a series of revisions from a revlog.
117 120 perfrevlogwrite
118 121 Benchmark writing a series of revisions to a revlog.
119 122 perfrevrange (no help text available)
120 123 perfrevset benchmark the execution time of a revset
121 124 perfstartup (no help text available)
122 125 perfstatus (no help text available)
123 126 perftags (no help text available)
124 127 perftemplating
125 128 test the rendering time of a given template
126 129 perfunidiff benchmark a unified diff between revisions
127 130 perfvolatilesets
128 131 benchmark the computation of various volatile set
129 132 perfwalk (no help text available)
130 133 perfwrite microbenchmark ui.write
131 134
132 135 (use 'hg help -v perfstatusext' to show built-in aliases and global options)
133 136 $ hg perfaddremove
134 137 $ hg perfancestors
135 138 $ hg perfancestorset 2
136 139 $ hg perfannotate a
137 140 $ hg perfbdiff -c 1
138 141 $ hg perfbdiff --alldata 1
139 142 $ hg perfunidiff -c 1
140 143 $ hg perfunidiff --alldata 1
141 144 $ hg perfbookmarks
142 145 $ hg perfbranchmap
143 146 $ hg perfcca
144 147 $ hg perfchangegroupchangelog
145 148 $ hg perfchangeset 2
146 149 $ hg perfctxfiles 2
147 150 $ hg perfdiffwd
148 151 $ hg perfdirfoldmap
149 152 $ hg perfdirs
150 153 $ hg perfdirstate
151 154 $ hg perfdirstatedirs
152 155 $ hg perfdirstatefoldmap
153 156 $ hg perfdirstatewrite
154 157 #if repofncache
155 158 $ hg perffncacheencode
156 159 $ hg perffncacheload
157 160 $ hg debugrebuildfncache
158 161 fncache already up to date
159 162 $ hg perffncachewrite
160 163 $ hg debugrebuildfncache
161 164 fncache already up to date
162 165 #endif
163 166 $ hg perfheads
164 167 $ hg perfindex
165 168 $ hg perflinelogedits -n 1
166 169 $ hg perfloadmarkers
167 170 $ hg perflog
168 171 $ hg perflookup 2
169 172 $ hg perflrucache
170 173 $ hg perfmanifest 2
171 174 $ hg perfmanifest -m 44fe2c8352bb3a478ffd7d8350bbc721920134d1
172 175 $ hg perfmanifest -m 44fe2c8352bb
173 176 abort: manifest revision must be integer or full node
174 177 [255]
175 178 $ hg perfmergecalculate -r 3
176 179 $ hg perfmoonwalk
177 180 $ hg perfnodelookup 2
178 181 $ hg perfpathcopies 1 2
179 182 $ hg perfrawfiles 2
180 183 $ hg perfrevlogindex -c
181 184 #if reporevlogstore
182 185 $ hg perfrevlogrevisions .hg/store/data/a.i
183 186 #endif
184 187 $ hg perfrevlogrevision -m 0
185 188 $ hg perfrevlogchunks -c
186 189 $ hg perfrevrange
187 190 $ hg perfrevset 'all()'
188 191 $ hg perfstartup
189 192 $ hg perfstatus
190 193 $ hg perftags
191 194 $ hg perftemplating
192 195 $ hg perfvolatilesets
193 196 $ hg perfwalk
194 197 $ hg perfparents
195 198
196 199 test actual output
197 200 ------------------
198 201
199 202 normal output:
200 203
201 204 $ hg perfheads --config perf.stub=no
202 205 ! wall * comb * user * sys * (best of *) (glob)
203 206
204 207 detailed output:
205 208
206 209 $ hg perfheads --config perf.all-timing=yes --config perf.stub=no
207 210 ! wall * comb * user * sys * (best of *) (glob)
208 211 ! wall * comb * user * sys * (max of *) (glob)
209 212 ! wall * comb * user * sys * (avg of *) (glob)
210 213 ! wall * comb * user * sys * (median of *) (glob)
211 214
212 215 test json output
213 216 ----------------
214 217
215 218 normal output:
216 219
217 220 $ hg perfheads --template json --config perf.stub=no
218 221 [
219 222 {
220 223 "comb": *, (glob)
221 224 "count": *, (glob)
222 225 "sys": *, (glob)
223 226 "user": *, (glob)
224 227 "wall": * (glob)
225 228 }
226 229 ]
227 230
228 231 detailed output:
229 232
230 233 $ hg perfheads --template json --config perf.all-timing=yes --config perf.stub=no
231 234 [
232 235 {
233 236 "avg.comb": *, (glob)
234 237 "avg.count": *, (glob)
235 238 "avg.sys": *, (glob)
236 239 "avg.user": *, (glob)
237 240 "avg.wall": *, (glob)
238 241 "comb": *, (glob)
239 242 "count": *, (glob)
240 243 "max.comb": *, (glob)
241 244 "max.count": *, (glob)
242 245 "max.sys": *, (glob)
243 246 "max.user": *, (glob)
244 247 "max.wall": *, (glob)
245 248 "median.comb": *, (glob)
246 249 "median.count": *, (glob)
247 250 "median.sys": *, (glob)
248 251 "median.user": *, (glob)
249 252 "median.wall": *, (glob)
250 253 "sys": *, (glob)
251 254 "user": *, (glob)
252 255 "wall": * (glob)
253 256 }
254 257 ]
255 258
256 259 Check perf.py for historical portability
257 260 ----------------------------------------
258 261
259 262 $ cd "$TESTDIR/.."
260 263
261 264 $ (testrepohg files -r 1.2 glob:mercurial/*.c glob:mercurial/*.py;
262 265 > testrepohg files -r tip glob:mercurial/*.c glob:mercurial/*.py) |
263 266 > "$TESTDIR"/check-perf-code.py contrib/perf.py
264 267 contrib/perf.py:\d+: (re)
265 268 > from mercurial import (
266 269 import newer module separately in try clause for early Mercurial
267 270 contrib/perf.py:\d+: (re)
268 271 > from mercurial import (
269 272 import newer module separately in try clause for early Mercurial
270 273 contrib/perf.py:\d+: (re)
271 274 > origindexpath = orig.opener.join(orig.indexfile)
272 275 use getvfs()/getsvfs() for early Mercurial
273 276 contrib/perf.py:\d+: (re)
274 277 > origdatapath = orig.opener.join(orig.datafile)
275 278 use getvfs()/getsvfs() for early Mercurial
276 279 contrib/perf.py:\d+: (re)
277 280 > vfs = vfsmod.vfs(tmpdir)
278 281 use getvfs()/getsvfs() for early Mercurial
279 282 contrib/perf.py:\d+: (re)
280 283 > vfs.options = getattr(orig.opener, 'options', None)
281 284 use getvfs()/getsvfs() for early Mercurial
282 285 [1]
General Comments 0
You need to be logged in to leave comments. Login now