##// END OF EJS Templates
perf: extract the timing of a section in a context manager...
Boris Feld -
r40179:acf560bc default
parent child Browse files
Show More
@@ -1,2107 +1,2116
1 1 # perf.py - performance test routines
2 2 '''helper extension to measure performance'''
3 3
4 4 # "historical portability" policy of perf.py:
5 5 #
6 6 # We have to do:
7 7 # - make perf.py "loadable" with as wide Mercurial version as possible
8 8 # This doesn't mean that perf commands work correctly with that Mercurial.
9 9 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
10 10 # - make historical perf command work correctly with as wide Mercurial
11 11 # version as possible
12 12 #
13 13 # We have to do, if possible with reasonable cost:
14 14 # - make recent perf command for historical feature work correctly
15 15 # with early Mercurial
16 16 #
17 17 # We don't have to do:
18 18 # - make perf command for recent feature work correctly with early
19 19 # Mercurial
20 20
21 21 from __future__ import absolute_import
22 import contextlib
22 23 import functools
23 24 import gc
24 25 import os
25 26 import random
26 27 import struct
27 28 import sys
28 29 import threading
29 30 import time
30 31 from mercurial import (
31 32 changegroup,
32 33 cmdutil,
33 34 commands,
34 35 copies,
35 36 error,
36 37 extensions,
37 38 mdiff,
38 39 merge,
39 40 revlog,
40 41 util,
41 42 )
42 43
43 44 # for "historical portability":
44 45 # try to import modules separately (in dict order), and ignore
45 46 # failure, because these aren't available with early Mercurial
46 47 try:
47 48 from mercurial import branchmap # since 2.5 (or bcee63733aad)
48 49 except ImportError:
49 50 pass
50 51 try:
51 52 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
52 53 except ImportError:
53 54 pass
54 55 try:
55 56 from mercurial import registrar # since 3.7 (or 37d50250b696)
56 57 dir(registrar) # forcibly load it
57 58 except ImportError:
58 59 registrar = None
59 60 try:
60 61 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
61 62 except ImportError:
62 63 pass
63 64 try:
64 65 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
65 66 except ImportError:
66 67 pass
67 68
68 69 def identity(a):
69 70 return a
70 71
71 72 try:
72 73 from mercurial import pycompat
73 74 getargspec = pycompat.getargspec # added to module after 4.5
74 75 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
75 76 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
76 77 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
77 78 if pycompat.ispy3:
78 79 _maxint = sys.maxsize # per py3 docs for replacing maxint
79 80 else:
80 81 _maxint = sys.maxint
81 82 except (ImportError, AttributeError):
82 83 import inspect
83 84 getargspec = inspect.getargspec
84 85 _byteskwargs = identity
85 86 _maxint = sys.maxint # no py3 support
86 87 _sysstr = lambda x: x # no py3 support
87 88 _xrange = xrange
88 89
89 90 try:
90 91 # 4.7+
91 92 queue = pycompat.queue.Queue
92 93 except (AttributeError, ImportError):
93 94 # <4.7.
94 95 try:
95 96 queue = pycompat.queue
96 97 except (AttributeError, ImportError):
97 98 queue = util.queue
98 99
99 100 try:
100 101 from mercurial import logcmdutil
101 102 makelogtemplater = logcmdutil.maketemplater
102 103 except (AttributeError, ImportError):
103 104 try:
104 105 makelogtemplater = cmdutil.makelogtemplater
105 106 except (AttributeError, ImportError):
106 107 makelogtemplater = None
107 108
108 109 # for "historical portability":
109 110 # define util.safehasattr forcibly, because util.safehasattr has been
110 111 # available since 1.9.3 (or 94b200a11cf7)
111 112 _undefined = object()
112 113 def safehasattr(thing, attr):
113 114 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
114 115 setattr(util, 'safehasattr', safehasattr)
115 116
116 117 # for "historical portability":
117 118 # define util.timer forcibly, because util.timer has been available
118 119 # since ae5d60bb70c9
119 120 if safehasattr(time, 'perf_counter'):
120 121 util.timer = time.perf_counter
121 122 elif os.name == b'nt':
122 123 util.timer = time.clock
123 124 else:
124 125 util.timer = time.time
125 126
126 127 # for "historical portability":
127 128 # use locally defined empty option list, if formatteropts isn't
128 129 # available, because commands.formatteropts has been available since
129 130 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
130 131 # available since 2.2 (or ae5f92e154d3)
131 132 formatteropts = getattr(cmdutil, "formatteropts",
132 133 getattr(commands, "formatteropts", []))
133 134
134 135 # for "historical portability":
135 136 # use locally defined option list, if debugrevlogopts isn't available,
136 137 # because commands.debugrevlogopts has been available since 3.7 (or
137 138 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
138 139 # since 1.9 (or a79fea6b3e77).
139 140 revlogopts = getattr(cmdutil, "debugrevlogopts",
140 141 getattr(commands, "debugrevlogopts", [
141 142 (b'c', b'changelog', False, (b'open changelog')),
142 143 (b'm', b'manifest', False, (b'open manifest')),
143 144 (b'', b'dir', False, (b'open directory manifest')),
144 145 ]))
145 146
146 147 cmdtable = {}
147 148
148 149 # for "historical portability":
149 150 # define parsealiases locally, because cmdutil.parsealiases has been
150 151 # available since 1.5 (or 6252852b4332)
151 152 def parsealiases(cmd):
152 153 return cmd.lstrip(b"^").split(b"|")
153 154
154 155 if safehasattr(registrar, 'command'):
155 156 command = registrar.command(cmdtable)
156 157 elif safehasattr(cmdutil, 'command'):
157 158 command = cmdutil.command(cmdtable)
158 159 if b'norepo' not in getargspec(command).args:
159 160 # for "historical portability":
160 161 # wrap original cmdutil.command, because "norepo" option has
161 162 # been available since 3.1 (or 75a96326cecb)
162 163 _command = command
163 164 def command(name, options=(), synopsis=None, norepo=False):
164 165 if norepo:
165 166 commands.norepo += b' %s' % b' '.join(parsealiases(name))
166 167 return _command(name, list(options), synopsis)
167 168 else:
168 169 # for "historical portability":
169 170 # define "@command" annotation locally, because cmdutil.command
170 171 # has been available since 1.9 (or 2daa5179e73f)
171 172 def command(name, options=(), synopsis=None, norepo=False):
172 173 def decorator(func):
173 174 if synopsis:
174 175 cmdtable[name] = func, list(options), synopsis
175 176 else:
176 177 cmdtable[name] = func, list(options)
177 178 if norepo:
178 179 commands.norepo += b' %s' % b' '.join(parsealiases(name))
179 180 return func
180 181 return decorator
181 182
182 183 try:
183 184 import mercurial.registrar
184 185 import mercurial.configitems
185 186 configtable = {}
186 187 configitem = mercurial.registrar.configitem(configtable)
187 188 configitem(b'perf', b'presleep',
188 189 default=mercurial.configitems.dynamicdefault,
189 190 )
190 191 configitem(b'perf', b'stub',
191 192 default=mercurial.configitems.dynamicdefault,
192 193 )
193 194 configitem(b'perf', b'parentscount',
194 195 default=mercurial.configitems.dynamicdefault,
195 196 )
196 197 configitem(b'perf', b'all-timing',
197 198 default=mercurial.configitems.dynamicdefault,
198 199 )
199 200 except (ImportError, AttributeError):
200 201 pass
201 202
202 203 def getlen(ui):
203 204 if ui.configbool(b"perf", b"stub", False):
204 205 return lambda x: 1
205 206 return len
206 207
207 208 def gettimer(ui, opts=None):
208 209 """return a timer function and formatter: (timer, formatter)
209 210
210 211 This function exists to gather the creation of formatter in a single
211 212 place instead of duplicating it in all performance commands."""
212 213
213 214 # enforce an idle period before execution to counteract power management
214 215 # experimental config: perf.presleep
215 216 time.sleep(getint(ui, b"perf", b"presleep", 1))
216 217
217 218 if opts is None:
218 219 opts = {}
219 220 # redirect all to stderr unless buffer api is in use
220 221 if not ui._buffers:
221 222 ui = ui.copy()
222 223 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
223 224 if uifout:
224 225 # for "historical portability":
225 226 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
226 227 uifout.set(ui.ferr)
227 228
228 229 # get a formatter
229 230 uiformatter = getattr(ui, 'formatter', None)
230 231 if uiformatter:
231 232 fm = uiformatter(b'perf', opts)
232 233 else:
233 234 # for "historical portability":
234 235 # define formatter locally, because ui.formatter has been
235 236 # available since 2.2 (or ae5f92e154d3)
236 237 from mercurial import node
237 238 class defaultformatter(object):
238 239 """Minimized composition of baseformatter and plainformatter
239 240 """
240 241 def __init__(self, ui, topic, opts):
241 242 self._ui = ui
242 243 if ui.debugflag:
243 244 self.hexfunc = node.hex
244 245 else:
245 246 self.hexfunc = node.short
246 247 def __nonzero__(self):
247 248 return False
248 249 __bool__ = __nonzero__
249 250 def startitem(self):
250 251 pass
251 252 def data(self, **data):
252 253 pass
253 254 def write(self, fields, deftext, *fielddata, **opts):
254 255 self._ui.write(deftext % fielddata, **opts)
255 256 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
256 257 if cond:
257 258 self._ui.write(deftext % fielddata, **opts)
258 259 def plain(self, text, **opts):
259 260 self._ui.write(text, **opts)
260 261 def end(self):
261 262 pass
262 263 fm = defaultformatter(ui, b'perf', opts)
263 264
264 265 # stub function, runs code only once instead of in a loop
265 266 # experimental config: perf.stub
266 267 if ui.configbool(b"perf", b"stub", False):
267 268 return functools.partial(stub_timer, fm), fm
268 269
269 270 # experimental config: perf.all-timing
270 271 displayall = ui.configbool(b"perf", b"all-timing", False)
271 272 return functools.partial(_timer, fm, displayall=displayall), fm
272 273
273 274 def stub_timer(fm, func, title=None):
274 275 func()
275 276
277 @contextlib.contextmanager
278 def timeone():
279 r = []
280 ostart = os.times()
281 cstart = util.timer()
282 yield r
283 cstop = util.timer()
284 ostop = os.times()
285 a, b = ostart, ostop
286 r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
287
276 288 def _timer(fm, func, title=None, displayall=False):
277 289 gc.collect()
278 290 results = []
279 291 begin = util.timer()
280 292 count = 0
281 293 while True:
282 ostart = os.times()
283 cstart = util.timer()
284 r = func()
294 with timeone() as item:
295 r = func()
296 count += 1
297 results.append(item[0])
285 298 cstop = util.timer()
286 ostop = os.times()
287 count += 1
288 a, b = ostart, ostop
289 results.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
290 299 if cstop - begin > 3 and count >= 100:
291 300 break
292 301 if cstop - begin > 10 and count >= 3:
293 302 break
294 303
295 304 fm.startitem()
296 305
297 306 if title:
298 307 fm.write(b'title', b'! %s\n', title)
299 308 if r:
300 309 fm.write(b'result', b'! result: %s\n', r)
301 310 def display(role, entry):
302 311 prefix = b''
303 312 if role != b'best':
304 313 prefix = b'%s.' % role
305 314 fm.plain(b'!')
306 315 fm.write(prefix + b'wall', b' wall %f', entry[0])
307 316 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
308 317 fm.write(prefix + b'user', b' user %f', entry[1])
309 318 fm.write(prefix + b'sys', b' sys %f', entry[2])
310 319 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
311 320 fm.plain(b'\n')
312 321 results.sort()
313 322 min_val = results[0]
314 323 display(b'best', min_val)
315 324 if displayall:
316 325 max_val = results[-1]
317 326 display(b'max', max_val)
318 327 avg = tuple([sum(x) / count for x in zip(*results)])
319 328 display(b'avg', avg)
320 329 median = results[len(results) // 2]
321 330 display(b'median', median)
322 331
323 332 # utilities for historical portability
324 333
325 334 def getint(ui, section, name, default):
326 335 # for "historical portability":
327 336 # ui.configint has been available since 1.9 (or fa2b596db182)
328 337 v = ui.config(section, name, None)
329 338 if v is None:
330 339 return default
331 340 try:
332 341 return int(v)
333 342 except ValueError:
334 343 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
335 344 % (section, name, v))
336 345
337 346 def safeattrsetter(obj, name, ignoremissing=False):
338 347 """Ensure that 'obj' has 'name' attribute before subsequent setattr
339 348
340 349 This function is aborted, if 'obj' doesn't have 'name' attribute
341 350 at runtime. This avoids overlooking removal of an attribute, which
342 351 breaks assumption of performance measurement, in the future.
343 352
344 353 This function returns the object to (1) assign a new value, and
345 354 (2) restore an original value to the attribute.
346 355
347 356 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
348 357 abortion, and this function returns None. This is useful to
349 358 examine an attribute, which isn't ensured in all Mercurial
350 359 versions.
351 360 """
352 361 if not util.safehasattr(obj, name):
353 362 if ignoremissing:
354 363 return None
355 364 raise error.Abort((b"missing attribute %s of %s might break assumption"
356 365 b" of performance measurement") % (name, obj))
357 366
358 367 origvalue = getattr(obj, _sysstr(name))
359 368 class attrutil(object):
360 369 def set(self, newvalue):
361 370 setattr(obj, _sysstr(name), newvalue)
362 371 def restore(self):
363 372 setattr(obj, _sysstr(name), origvalue)
364 373
365 374 return attrutil()
366 375
367 376 # utilities to examine each internal API changes
368 377
369 378 def getbranchmapsubsettable():
370 379 # for "historical portability":
371 380 # subsettable is defined in:
372 381 # - branchmap since 2.9 (or 175c6fd8cacc)
373 382 # - repoview since 2.5 (or 59a9f18d4587)
374 383 for mod in (branchmap, repoview):
375 384 subsettable = getattr(mod, 'subsettable', None)
376 385 if subsettable:
377 386 return subsettable
378 387
379 388 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
380 389 # branchmap and repoview modules exist, but subsettable attribute
381 390 # doesn't)
382 391 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
383 392 hint=b"use 2.5 or later")
384 393
385 394 def getsvfs(repo):
386 395 """Return appropriate object to access files under .hg/store
387 396 """
388 397 # for "historical portability":
389 398 # repo.svfs has been available since 2.3 (or 7034365089bf)
390 399 svfs = getattr(repo, 'svfs', None)
391 400 if svfs:
392 401 return svfs
393 402 else:
394 403 return getattr(repo, 'sopener')
395 404
396 405 def getvfs(repo):
397 406 """Return appropriate object to access files under .hg
398 407 """
399 408 # for "historical portability":
400 409 # repo.vfs has been available since 2.3 (or 7034365089bf)
401 410 vfs = getattr(repo, 'vfs', None)
402 411 if vfs:
403 412 return vfs
404 413 else:
405 414 return getattr(repo, 'opener')
406 415
407 416 def repocleartagscachefunc(repo):
408 417 """Return the function to clear tags cache according to repo internal API
409 418 """
410 419 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
411 420 # in this case, setattr(repo, '_tagscache', None) or so isn't
412 421 # correct way to clear tags cache, because existing code paths
413 422 # expect _tagscache to be a structured object.
414 423 def clearcache():
415 424 # _tagscache has been filteredpropertycache since 2.5 (or
416 425 # 98c867ac1330), and delattr() can't work in such case
417 426 if b'_tagscache' in vars(repo):
418 427 del repo.__dict__[b'_tagscache']
419 428 return clearcache
420 429
421 430 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
422 431 if repotags: # since 1.4 (or 5614a628d173)
423 432 return lambda : repotags.set(None)
424 433
425 434 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
426 435 if repotagscache: # since 0.6 (or d7df759d0e97)
427 436 return lambda : repotagscache.set(None)
428 437
429 438 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
430 439 # this point, but it isn't so problematic, because:
431 440 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
432 441 # in perftags() causes failure soon
433 442 # - perf.py itself has been available since 1.1 (or eb240755386d)
434 443 raise error.Abort((b"tags API of this hg command is unknown"))
435 444
436 445 # utilities to clear cache
437 446
438 447 def clearfilecache(repo, attrname):
439 448 unfi = repo.unfiltered()
440 449 if attrname in vars(unfi):
441 450 delattr(unfi, attrname)
442 451 unfi._filecache.pop(attrname, None)
443 452
444 453 # perf commands
445 454
446 455 @command(b'perfwalk', formatteropts)
447 456 def perfwalk(ui, repo, *pats, **opts):
448 457 opts = _byteskwargs(opts)
449 458 timer, fm = gettimer(ui, opts)
450 459 m = scmutil.match(repo[None], pats, {})
451 460 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
452 461 ignored=False))))
453 462 fm.end()
454 463
455 464 @command(b'perfannotate', formatteropts)
456 465 def perfannotate(ui, repo, f, **opts):
457 466 opts = _byteskwargs(opts)
458 467 timer, fm = gettimer(ui, opts)
459 468 fc = repo[b'.'][f]
460 469 timer(lambda: len(fc.annotate(True)))
461 470 fm.end()
462 471
463 472 @command(b'perfstatus',
464 473 [(b'u', b'unknown', False,
465 474 b'ask status to look for unknown files')] + formatteropts)
466 475 def perfstatus(ui, repo, **opts):
467 476 opts = _byteskwargs(opts)
468 477 #m = match.always(repo.root, repo.getcwd())
469 478 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
470 479 # False))))
471 480 timer, fm = gettimer(ui, opts)
472 481 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
473 482 fm.end()
474 483
475 484 @command(b'perfaddremove', formatteropts)
476 485 def perfaddremove(ui, repo, **opts):
477 486 opts = _byteskwargs(opts)
478 487 timer, fm = gettimer(ui, opts)
479 488 try:
480 489 oldquiet = repo.ui.quiet
481 490 repo.ui.quiet = True
482 491 matcher = scmutil.match(repo[None])
483 492 opts[b'dry_run'] = True
484 493 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
485 494 finally:
486 495 repo.ui.quiet = oldquiet
487 496 fm.end()
488 497
489 498 def clearcaches(cl):
490 499 # behave somewhat consistently across internal API changes
491 500 if util.safehasattr(cl, b'clearcaches'):
492 501 cl.clearcaches()
493 502 elif util.safehasattr(cl, b'_nodecache'):
494 503 from mercurial.node import nullid, nullrev
495 504 cl._nodecache = {nullid: nullrev}
496 505 cl._nodepos = None
497 506
498 507 @command(b'perfheads', formatteropts)
499 508 def perfheads(ui, repo, **opts):
500 509 opts = _byteskwargs(opts)
501 510 timer, fm = gettimer(ui, opts)
502 511 cl = repo.changelog
503 512 def d():
504 513 len(cl.headrevs())
505 514 clearcaches(cl)
506 515 timer(d)
507 516 fm.end()
508 517
509 518 @command(b'perftags', formatteropts)
510 519 def perftags(ui, repo, **opts):
511 520 import mercurial.changelog
512 521 import mercurial.manifest
513 522
514 523 opts = _byteskwargs(opts)
515 524 timer, fm = gettimer(ui, opts)
516 525 svfs = getsvfs(repo)
517 526 repocleartagscache = repocleartagscachefunc(repo)
518 527 def t():
519 528 repo.changelog = mercurial.changelog.changelog(svfs)
520 529 rootmanifest = mercurial.manifest.manifestrevlog(svfs)
521 530 repo.manifestlog = mercurial.manifest.manifestlog(svfs, repo,
522 531 rootmanifest)
523 532 repocleartagscache()
524 533 return len(repo.tags())
525 534 timer(t)
526 535 fm.end()
527 536
528 537 @command(b'perfancestors', formatteropts)
529 538 def perfancestors(ui, repo, **opts):
530 539 opts = _byteskwargs(opts)
531 540 timer, fm = gettimer(ui, opts)
532 541 heads = repo.changelog.headrevs()
533 542 def d():
534 543 for a in repo.changelog.ancestors(heads):
535 544 pass
536 545 timer(d)
537 546 fm.end()
538 547
539 548 @command(b'perfancestorset', formatteropts)
540 549 def perfancestorset(ui, repo, revset, **opts):
541 550 opts = _byteskwargs(opts)
542 551 timer, fm = gettimer(ui, opts)
543 552 revs = repo.revs(revset)
544 553 heads = repo.changelog.headrevs()
545 554 def d():
546 555 s = repo.changelog.ancestors(heads)
547 556 for rev in revs:
548 557 rev in s
549 558 timer(d)
550 559 fm.end()
551 560
552 561 @command(b'perfbookmarks', formatteropts)
553 562 def perfbookmarks(ui, repo, **opts):
554 563 """benchmark parsing bookmarks from disk to memory"""
555 564 opts = _byteskwargs(opts)
556 565 timer, fm = gettimer(ui, opts)
557 566 def d():
558 567 clearfilecache(repo, b'_bookmarks')
559 568 repo._bookmarks
560 569 timer(d)
561 570 fm.end()
562 571
563 572 @command(b'perfbundleread', formatteropts, b'BUNDLE')
564 573 def perfbundleread(ui, repo, bundlepath, **opts):
565 574 """Benchmark reading of bundle files.
566 575
567 576 This command is meant to isolate the I/O part of bundle reading as
568 577 much as possible.
569 578 """
570 579 from mercurial import (
571 580 bundle2,
572 581 exchange,
573 582 streamclone,
574 583 )
575 584
576 585 opts = _byteskwargs(opts)
577 586
578 587 def makebench(fn):
579 588 def run():
580 589 with open(bundlepath, b'rb') as fh:
581 590 bundle = exchange.readbundle(ui, fh, bundlepath)
582 591 fn(bundle)
583 592
584 593 return run
585 594
586 595 def makereadnbytes(size):
587 596 def run():
588 597 with open(bundlepath, b'rb') as fh:
589 598 bundle = exchange.readbundle(ui, fh, bundlepath)
590 599 while bundle.read(size):
591 600 pass
592 601
593 602 return run
594 603
595 604 def makestdioread(size):
596 605 def run():
597 606 with open(bundlepath, b'rb') as fh:
598 607 while fh.read(size):
599 608 pass
600 609
601 610 return run
602 611
603 612 # bundle1
604 613
605 614 def deltaiter(bundle):
606 615 for delta in bundle.deltaiter():
607 616 pass
608 617
609 618 def iterchunks(bundle):
610 619 for chunk in bundle.getchunks():
611 620 pass
612 621
613 622 # bundle2
614 623
615 624 def forwardchunks(bundle):
616 625 for chunk in bundle._forwardchunks():
617 626 pass
618 627
619 628 def iterparts(bundle):
620 629 for part in bundle.iterparts():
621 630 pass
622 631
623 632 def iterpartsseekable(bundle):
624 633 for part in bundle.iterparts(seekable=True):
625 634 pass
626 635
627 636 def seek(bundle):
628 637 for part in bundle.iterparts(seekable=True):
629 638 part.seek(0, os.SEEK_END)
630 639
631 640 def makepartreadnbytes(size):
632 641 def run():
633 642 with open(bundlepath, b'rb') as fh:
634 643 bundle = exchange.readbundle(ui, fh, bundlepath)
635 644 for part in bundle.iterparts():
636 645 while part.read(size):
637 646 pass
638 647
639 648 return run
640 649
641 650 benches = [
642 651 (makestdioread(8192), b'read(8k)'),
643 652 (makestdioread(16384), b'read(16k)'),
644 653 (makestdioread(32768), b'read(32k)'),
645 654 (makestdioread(131072), b'read(128k)'),
646 655 ]
647 656
648 657 with open(bundlepath, b'rb') as fh:
649 658 bundle = exchange.readbundle(ui, fh, bundlepath)
650 659
651 660 if isinstance(bundle, changegroup.cg1unpacker):
652 661 benches.extend([
653 662 (makebench(deltaiter), b'cg1 deltaiter()'),
654 663 (makebench(iterchunks), b'cg1 getchunks()'),
655 664 (makereadnbytes(8192), b'cg1 read(8k)'),
656 665 (makereadnbytes(16384), b'cg1 read(16k)'),
657 666 (makereadnbytes(32768), b'cg1 read(32k)'),
658 667 (makereadnbytes(131072), b'cg1 read(128k)'),
659 668 ])
660 669 elif isinstance(bundle, bundle2.unbundle20):
661 670 benches.extend([
662 671 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
663 672 (makebench(iterparts), b'bundle2 iterparts()'),
664 673 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
665 674 (makebench(seek), b'bundle2 part seek()'),
666 675 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
667 676 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
668 677 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
669 678 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
670 679 ])
671 680 elif isinstance(bundle, streamclone.streamcloneapplier):
672 681 raise error.Abort(b'stream clone bundles not supported')
673 682 else:
674 683 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
675 684
676 685 for fn, title in benches:
677 686 timer, fm = gettimer(ui, opts)
678 687 timer(fn, title=title)
679 688 fm.end()
680 689
681 690 @command(b'perfchangegroupchangelog', formatteropts +
682 691 [(b'', b'version', b'02', b'changegroup version'),
683 692 (b'r', b'rev', b'', b'revisions to add to changegroup')])
684 693 def perfchangegroupchangelog(ui, repo, version=b'02', rev=None, **opts):
685 694 """Benchmark producing a changelog group for a changegroup.
686 695
687 696 This measures the time spent processing the changelog during a
688 697 bundle operation. This occurs during `hg bundle` and on a server
689 698 processing a `getbundle` wire protocol request (handles clones
690 699 and pull requests).
691 700
692 701 By default, all revisions are added to the changegroup.
693 702 """
694 703 opts = _byteskwargs(opts)
695 704 cl = repo.changelog
696 705 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
697 706 bundler = changegroup.getbundler(version, repo)
698 707
699 708 def d():
700 709 state, chunks = bundler._generatechangelog(cl, nodes)
701 710 for chunk in chunks:
702 711 pass
703 712
704 713 timer, fm = gettimer(ui, opts)
705 714
706 715 # Terminal printing can interfere with timing. So disable it.
707 716 with ui.configoverride({(b'progress', b'disable'): True}):
708 717 timer(d)
709 718
710 719 fm.end()
711 720
712 721 @command(b'perfdirs', formatteropts)
713 722 def perfdirs(ui, repo, **opts):
714 723 opts = _byteskwargs(opts)
715 724 timer, fm = gettimer(ui, opts)
716 725 dirstate = repo.dirstate
717 726 b'a' in dirstate
718 727 def d():
719 728 dirstate.hasdir(b'a')
720 729 del dirstate._map._dirs
721 730 timer(d)
722 731 fm.end()
723 732
724 733 @command(b'perfdirstate', formatteropts)
725 734 def perfdirstate(ui, repo, **opts):
726 735 opts = _byteskwargs(opts)
727 736 timer, fm = gettimer(ui, opts)
728 737 b"a" in repo.dirstate
729 738 def d():
730 739 repo.dirstate.invalidate()
731 740 b"a" in repo.dirstate
732 741 timer(d)
733 742 fm.end()
734 743
735 744 @command(b'perfdirstatedirs', formatteropts)
736 745 def perfdirstatedirs(ui, repo, **opts):
737 746 opts = _byteskwargs(opts)
738 747 timer, fm = gettimer(ui, opts)
739 748 b"a" in repo.dirstate
740 749 def d():
741 750 repo.dirstate.hasdir(b"a")
742 751 del repo.dirstate._map._dirs
743 752 timer(d)
744 753 fm.end()
745 754
746 755 @command(b'perfdirstatefoldmap', formatteropts)
747 756 def perfdirstatefoldmap(ui, repo, **opts):
748 757 opts = _byteskwargs(opts)
749 758 timer, fm = gettimer(ui, opts)
750 759 dirstate = repo.dirstate
751 760 b'a' in dirstate
752 761 def d():
753 762 dirstate._map.filefoldmap.get(b'a')
754 763 del dirstate._map.filefoldmap
755 764 timer(d)
756 765 fm.end()
757 766
758 767 @command(b'perfdirfoldmap', formatteropts)
759 768 def perfdirfoldmap(ui, repo, **opts):
760 769 opts = _byteskwargs(opts)
761 770 timer, fm = gettimer(ui, opts)
762 771 dirstate = repo.dirstate
763 772 b'a' in dirstate
764 773 def d():
765 774 dirstate._map.dirfoldmap.get(b'a')
766 775 del dirstate._map.dirfoldmap
767 776 del dirstate._map._dirs
768 777 timer(d)
769 778 fm.end()
770 779
771 780 @command(b'perfdirstatewrite', formatteropts)
772 781 def perfdirstatewrite(ui, repo, **opts):
773 782 opts = _byteskwargs(opts)
774 783 timer, fm = gettimer(ui, opts)
775 784 ds = repo.dirstate
776 785 b"a" in ds
777 786 def d():
778 787 ds._dirty = True
779 788 ds.write(repo.currenttransaction())
780 789 timer(d)
781 790 fm.end()
782 791
783 792 @command(b'perfmergecalculate',
784 793 [(b'r', b'rev', b'.', b'rev to merge against')] + formatteropts)
785 794 def perfmergecalculate(ui, repo, rev, **opts):
786 795 opts = _byteskwargs(opts)
787 796 timer, fm = gettimer(ui, opts)
788 797 wctx = repo[None]
789 798 rctx = scmutil.revsingle(repo, rev, rev)
790 799 ancestor = wctx.ancestor(rctx)
791 800 # we don't want working dir files to be stat'd in the benchmark, so prime
792 801 # that cache
793 802 wctx.dirty()
794 803 def d():
795 804 # acceptremote is True because we don't want prompts in the middle of
796 805 # our benchmark
797 806 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
798 807 acceptremote=True, followcopies=True)
799 808 timer(d)
800 809 fm.end()
801 810
802 811 @command(b'perfpathcopies', [], b"REV REV")
803 812 def perfpathcopies(ui, repo, rev1, rev2, **opts):
804 813 opts = _byteskwargs(opts)
805 814 timer, fm = gettimer(ui, opts)
806 815 ctx1 = scmutil.revsingle(repo, rev1, rev1)
807 816 ctx2 = scmutil.revsingle(repo, rev2, rev2)
808 817 def d():
809 818 copies.pathcopies(ctx1, ctx2)
810 819 timer(d)
811 820 fm.end()
812 821
813 822 @command(b'perfphases',
814 823 [(b'', b'full', False, b'include file reading time too'),
815 824 ], b"")
816 825 def perfphases(ui, repo, **opts):
817 826 """benchmark phasesets computation"""
818 827 opts = _byteskwargs(opts)
819 828 timer, fm = gettimer(ui, opts)
820 829 _phases = repo._phasecache
821 830 full = opts.get(b'full')
822 831 def d():
823 832 phases = _phases
824 833 if full:
825 834 clearfilecache(repo, b'_phasecache')
826 835 phases = repo._phasecache
827 836 phases.invalidate()
828 837 phases.loadphaserevs(repo)
829 838 timer(d)
830 839 fm.end()
831 840
832 841 @command(b'perfphasesremote',
833 842 [], b"[DEST]")
834 843 def perfphasesremote(ui, repo, dest=None, **opts):
835 844 """benchmark time needed to analyse phases of the remote server"""
836 845 from mercurial.node import (
837 846 bin,
838 847 )
839 848 from mercurial import (
840 849 exchange,
841 850 hg,
842 851 phases,
843 852 )
844 853 opts = _byteskwargs(opts)
845 854 timer, fm = gettimer(ui, opts)
846 855
847 856 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
848 857 if not path:
849 858 raise error.Abort((b'default repository not configured!'),
850 859 hint=(b"see 'hg help config.paths'"))
851 860 dest = path.pushloc or path.loc
852 861 branches = (path.branch, opts.get(b'branch') or [])
853 862 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
854 863 revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get(b'rev'))
855 864 other = hg.peer(repo, opts, dest)
856 865
857 866 # easier to perform discovery through the operation
858 867 op = exchange.pushoperation(repo, other)
859 868 exchange._pushdiscoverychangeset(op)
860 869
861 870 remotesubset = op.fallbackheads
862 871
863 872 with other.commandexecutor() as e:
864 873 remotephases = e.callcommand(b'listkeys',
865 874 {b'namespace': b'phases'}).result()
866 875 del other
867 876 publishing = remotephases.get(b'publishing', False)
868 877 if publishing:
869 878 ui.status((b'publishing: yes\n'))
870 879 else:
871 880 ui.status((b'publishing: no\n'))
872 881
873 882 nodemap = repo.changelog.nodemap
874 883 nonpublishroots = 0
875 884 for nhex, phase in remotephases.iteritems():
876 885 if nhex == b'publishing': # ignore data related to publish option
877 886 continue
878 887 node = bin(nhex)
879 888 if node in nodemap and int(phase):
880 889 nonpublishroots += 1
881 890 ui.status((b'number of roots: %d\n') % len(remotephases))
882 891 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
883 892 def d():
884 893 phases.remotephasessummary(repo,
885 894 remotesubset,
886 895 remotephases)
887 896 timer(d)
888 897 fm.end()
889 898
890 899 @command(b'perfmanifest',[
891 900 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
892 901 (b'', b'clear-disk', False, b'clear on-disk caches too'),
893 902 ] + formatteropts, b'REV|NODE')
894 903 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
895 904 """benchmark the time to read a manifest from disk and return a usable
896 905 dict-like object
897 906
898 907 Manifest caches are cleared before retrieval."""
899 908 opts = _byteskwargs(opts)
900 909 timer, fm = gettimer(ui, opts)
901 910 if not manifest_rev:
902 911 ctx = scmutil.revsingle(repo, rev, rev)
903 912 t = ctx.manifestnode()
904 913 else:
905 914 from mercurial.node import bin
906 915
907 916 if len(rev) == 40:
908 917 t = bin(rev)
909 918 else:
910 919 try:
911 920 rev = int(rev)
912 921
913 922 if util.safehasattr(repo.manifestlog, b'getstorage'):
914 923 t = repo.manifestlog.getstorage(b'').node(rev)
915 924 else:
916 925 t = repo.manifestlog._revlog.lookup(rev)
917 926 except ValueError:
918 927 raise error.Abort(b'manifest revision must be integer or full '
919 928 b'node')
920 929 def d():
921 930 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
922 931 repo.manifestlog[t].read()
923 932 timer(d)
924 933 fm.end()
925 934
926 935 @command(b'perfchangeset', formatteropts)
927 936 def perfchangeset(ui, repo, rev, **opts):
928 937 opts = _byteskwargs(opts)
929 938 timer, fm = gettimer(ui, opts)
930 939 n = scmutil.revsingle(repo, rev).node()
931 940 def d():
932 941 repo.changelog.read(n)
933 942 #repo.changelog._cache = None
934 943 timer(d)
935 944 fm.end()
936 945
937 946 @command(b'perfindex', formatteropts)
938 947 def perfindex(ui, repo, **opts):
939 948 import mercurial.revlog
940 949 opts = _byteskwargs(opts)
941 950 timer, fm = gettimer(ui, opts)
942 951 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
943 952 n = repo[b"tip"].node()
944 953 svfs = getsvfs(repo)
945 954 def d():
946 955 cl = mercurial.revlog.revlog(svfs, b"00changelog.i")
947 956 cl.rev(n)
948 957 timer(d)
949 958 fm.end()
950 959
951 960 @command(b'perfstartup', formatteropts)
952 961 def perfstartup(ui, repo, **opts):
953 962 opts = _byteskwargs(opts)
954 963 timer, fm = gettimer(ui, opts)
955 964 cmd = sys.argv[0]
956 965 def d():
957 966 if os.name != r'nt':
958 967 os.system(b"HGRCPATH= %s version -q > /dev/null" % cmd)
959 968 else:
960 969 os.environ[r'HGRCPATH'] = r' '
961 970 os.system(r"%s version -q > NUL" % cmd)
962 971 timer(d)
963 972 fm.end()
964 973
965 974 @command(b'perfparents', formatteropts)
966 975 def perfparents(ui, repo, **opts):
967 976 opts = _byteskwargs(opts)
968 977 timer, fm = gettimer(ui, opts)
969 978 # control the number of commits perfparents iterates over
970 979 # experimental config: perf.parentscount
971 980 count = getint(ui, b"perf", b"parentscount", 1000)
972 981 if len(repo.changelog) < count:
973 982 raise error.Abort(b"repo needs %d commits for this test" % count)
974 983 repo = repo.unfiltered()
975 984 nl = [repo.changelog.node(i) for i in _xrange(count)]
976 985 def d():
977 986 for n in nl:
978 987 repo.changelog.parents(n)
979 988 timer(d)
980 989 fm.end()
981 990
982 991 @command(b'perfctxfiles', formatteropts)
983 992 def perfctxfiles(ui, repo, x, **opts):
984 993 opts = _byteskwargs(opts)
985 994 x = int(x)
986 995 timer, fm = gettimer(ui, opts)
987 996 def d():
988 997 len(repo[x].files())
989 998 timer(d)
990 999 fm.end()
991 1000
992 1001 @command(b'perfrawfiles', formatteropts)
993 1002 def perfrawfiles(ui, repo, x, **opts):
994 1003 opts = _byteskwargs(opts)
995 1004 x = int(x)
996 1005 timer, fm = gettimer(ui, opts)
997 1006 cl = repo.changelog
998 1007 def d():
999 1008 len(cl.read(x)[3])
1000 1009 timer(d)
1001 1010 fm.end()
1002 1011
1003 1012 @command(b'perflookup', formatteropts)
1004 1013 def perflookup(ui, repo, rev, **opts):
1005 1014 opts = _byteskwargs(opts)
1006 1015 timer, fm = gettimer(ui, opts)
1007 1016 timer(lambda: len(repo.lookup(rev)))
1008 1017 fm.end()
1009 1018
1010 1019 @command(b'perflinelogedits',
1011 1020 [(b'n', b'edits', 10000, b'number of edits'),
1012 1021 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1013 1022 ], norepo=True)
1014 1023 def perflinelogedits(ui, **opts):
1015 1024 from mercurial import linelog
1016 1025
1017 1026 opts = _byteskwargs(opts)
1018 1027
1019 1028 edits = opts[b'edits']
1020 1029 maxhunklines = opts[b'max_hunk_lines']
1021 1030
1022 1031 maxb1 = 100000
1023 1032 random.seed(0)
1024 1033 randint = random.randint
1025 1034 currentlines = 0
1026 1035 arglist = []
1027 1036 for rev in _xrange(edits):
1028 1037 a1 = randint(0, currentlines)
1029 1038 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1030 1039 b1 = randint(0, maxb1)
1031 1040 b2 = randint(b1, b1 + maxhunklines)
1032 1041 currentlines += (b2 - b1) - (a2 - a1)
1033 1042 arglist.append((rev, a1, a2, b1, b2))
1034 1043
1035 1044 def d():
1036 1045 ll = linelog.linelog()
1037 1046 for args in arglist:
1038 1047 ll.replacelines(*args)
1039 1048
1040 1049 timer, fm = gettimer(ui, opts)
1041 1050 timer(d)
1042 1051 fm.end()
1043 1052
1044 1053 @command(b'perfrevrange', formatteropts)
1045 1054 def perfrevrange(ui, repo, *specs, **opts):
1046 1055 opts = _byteskwargs(opts)
1047 1056 timer, fm = gettimer(ui, opts)
1048 1057 revrange = scmutil.revrange
1049 1058 timer(lambda: len(revrange(repo, specs)))
1050 1059 fm.end()
1051 1060
1052 1061 @command(b'perfnodelookup', formatteropts)
1053 1062 def perfnodelookup(ui, repo, rev, **opts):
1054 1063 opts = _byteskwargs(opts)
1055 1064 timer, fm = gettimer(ui, opts)
1056 1065 import mercurial.revlog
1057 1066 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1058 1067 n = scmutil.revsingle(repo, rev).node()
1059 1068 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1060 1069 def d():
1061 1070 cl.rev(n)
1062 1071 clearcaches(cl)
1063 1072 timer(d)
1064 1073 fm.end()
1065 1074
1066 1075 @command(b'perflog',
1067 1076 [(b'', b'rename', False, b'ask log to follow renames')
1068 1077 ] + formatteropts)
1069 1078 def perflog(ui, repo, rev=None, **opts):
1070 1079 opts = _byteskwargs(opts)
1071 1080 if rev is None:
1072 1081 rev=[]
1073 1082 timer, fm = gettimer(ui, opts)
1074 1083 ui.pushbuffer()
1075 1084 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1076 1085 copies=opts.get(b'rename')))
1077 1086 ui.popbuffer()
1078 1087 fm.end()
1079 1088
1080 1089 @command(b'perfmoonwalk', formatteropts)
1081 1090 def perfmoonwalk(ui, repo, **opts):
1082 1091 """benchmark walking the changelog backwards
1083 1092
1084 1093 This also loads the changelog data for each revision in the changelog.
1085 1094 """
1086 1095 opts = _byteskwargs(opts)
1087 1096 timer, fm = gettimer(ui, opts)
1088 1097 def moonwalk():
1089 1098 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1090 1099 ctx = repo[i]
1091 1100 ctx.branch() # read changelog data (in addition to the index)
1092 1101 timer(moonwalk)
1093 1102 fm.end()
1094 1103
1095 1104 @command(b'perftemplating',
1096 1105 [(b'r', b'rev', [], b'revisions to run the template on'),
1097 1106 ] + formatteropts)
1098 1107 def perftemplating(ui, repo, testedtemplate=None, **opts):
1099 1108 """test the rendering time of a given template"""
1100 1109 if makelogtemplater is None:
1101 1110 raise error.Abort((b"perftemplating not available with this Mercurial"),
1102 1111 hint=b"use 4.3 or later")
1103 1112
1104 1113 opts = _byteskwargs(opts)
1105 1114
1106 1115 nullui = ui.copy()
1107 1116 nullui.fout = open(os.devnull, r'wb')
1108 1117 nullui.disablepager()
1109 1118 revs = opts.get(b'rev')
1110 1119 if not revs:
1111 1120 revs = [b'all()']
1112 1121 revs = list(scmutil.revrange(repo, revs))
1113 1122
1114 1123 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1115 1124 b' {author|person}: {desc|firstline}\n')
1116 1125 if testedtemplate is None:
1117 1126 testedtemplate = defaulttemplate
1118 1127 displayer = makelogtemplater(nullui, repo, testedtemplate)
1119 1128 def format():
1120 1129 for r in revs:
1121 1130 ctx = repo[r]
1122 1131 displayer.show(ctx)
1123 1132 displayer.flush(ctx)
1124 1133
1125 1134 timer, fm = gettimer(ui, opts)
1126 1135 timer(format)
1127 1136 fm.end()
1128 1137
1129 1138 @command(b'perfcca', formatteropts)
1130 1139 def perfcca(ui, repo, **opts):
1131 1140 opts = _byteskwargs(opts)
1132 1141 timer, fm = gettimer(ui, opts)
1133 1142 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1134 1143 fm.end()
1135 1144
1136 1145 @command(b'perffncacheload', formatteropts)
1137 1146 def perffncacheload(ui, repo, **opts):
1138 1147 opts = _byteskwargs(opts)
1139 1148 timer, fm = gettimer(ui, opts)
1140 1149 s = repo.store
1141 1150 def d():
1142 1151 s.fncache._load()
1143 1152 timer(d)
1144 1153 fm.end()
1145 1154
1146 1155 @command(b'perffncachewrite', formatteropts)
1147 1156 def perffncachewrite(ui, repo, **opts):
1148 1157 opts = _byteskwargs(opts)
1149 1158 timer, fm = gettimer(ui, opts)
1150 1159 s = repo.store
1151 1160 lock = repo.lock()
1152 1161 s.fncache._load()
1153 1162 tr = repo.transaction(b'perffncachewrite')
1154 1163 tr.addbackup(b'fncache')
1155 1164 def d():
1156 1165 s.fncache._dirty = True
1157 1166 s.fncache.write(tr)
1158 1167 timer(d)
1159 1168 tr.close()
1160 1169 lock.release()
1161 1170 fm.end()
1162 1171
1163 1172 @command(b'perffncacheencode', formatteropts)
1164 1173 def perffncacheencode(ui, repo, **opts):
1165 1174 opts = _byteskwargs(opts)
1166 1175 timer, fm = gettimer(ui, opts)
1167 1176 s = repo.store
1168 1177 s.fncache._load()
1169 1178 def d():
1170 1179 for p in s.fncache.entries:
1171 1180 s.encode(p)
1172 1181 timer(d)
1173 1182 fm.end()
1174 1183
1175 1184 def _bdiffworker(q, blocks, xdiff, ready, done):
1176 1185 while not done.is_set():
1177 1186 pair = q.get()
1178 1187 while pair is not None:
1179 1188 if xdiff:
1180 1189 mdiff.bdiff.xdiffblocks(*pair)
1181 1190 elif blocks:
1182 1191 mdiff.bdiff.blocks(*pair)
1183 1192 else:
1184 1193 mdiff.textdiff(*pair)
1185 1194 q.task_done()
1186 1195 pair = q.get()
1187 1196 q.task_done() # for the None one
1188 1197 with ready:
1189 1198 ready.wait()
1190 1199
1191 1200 def _manifestrevision(repo, mnode):
1192 1201 ml = repo.manifestlog
1193 1202
1194 1203 if util.safehasattr(ml, b'getstorage'):
1195 1204 store = ml.getstorage(b'')
1196 1205 else:
1197 1206 store = ml._revlog
1198 1207
1199 1208 return store.revision(mnode)
1200 1209
1201 1210 @command(b'perfbdiff', revlogopts + formatteropts + [
1202 1211 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1203 1212 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1204 1213 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1205 1214 (b'', b'blocks', False, b'test computing diffs into blocks'),
1206 1215 (b'', b'xdiff', False, b'use xdiff algorithm'),
1207 1216 ],
1208 1217
1209 1218 b'-c|-m|FILE REV')
1210 1219 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1211 1220 """benchmark a bdiff between revisions
1212 1221
1213 1222 By default, benchmark a bdiff between its delta parent and itself.
1214 1223
1215 1224 With ``--count``, benchmark bdiffs between delta parents and self for N
1216 1225 revisions starting at the specified revision.
1217 1226
1218 1227 With ``--alldata``, assume the requested revision is a changeset and
1219 1228 measure bdiffs for all changes related to that changeset (manifest
1220 1229 and filelogs).
1221 1230 """
1222 1231 opts = _byteskwargs(opts)
1223 1232
1224 1233 if opts[b'xdiff'] and not opts[b'blocks']:
1225 1234 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1226 1235
1227 1236 if opts[b'alldata']:
1228 1237 opts[b'changelog'] = True
1229 1238
1230 1239 if opts.get(b'changelog') or opts.get(b'manifest'):
1231 1240 file_, rev = None, file_
1232 1241 elif rev is None:
1233 1242 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1234 1243
1235 1244 blocks = opts[b'blocks']
1236 1245 xdiff = opts[b'xdiff']
1237 1246 textpairs = []
1238 1247
1239 1248 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1240 1249
1241 1250 startrev = r.rev(r.lookup(rev))
1242 1251 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1243 1252 if opts[b'alldata']:
1244 1253 # Load revisions associated with changeset.
1245 1254 ctx = repo[rev]
1246 1255 mtext = _manifestrevision(repo, ctx.manifestnode())
1247 1256 for pctx in ctx.parents():
1248 1257 pman = _manifestrevision(repo, pctx.manifestnode())
1249 1258 textpairs.append((pman, mtext))
1250 1259
1251 1260 # Load filelog revisions by iterating manifest delta.
1252 1261 man = ctx.manifest()
1253 1262 pman = ctx.p1().manifest()
1254 1263 for filename, change in pman.diff(man).items():
1255 1264 fctx = repo.file(filename)
1256 1265 f1 = fctx.revision(change[0][0] or -1)
1257 1266 f2 = fctx.revision(change[1][0] or -1)
1258 1267 textpairs.append((f1, f2))
1259 1268 else:
1260 1269 dp = r.deltaparent(rev)
1261 1270 textpairs.append((r.revision(dp), r.revision(rev)))
1262 1271
1263 1272 withthreads = threads > 0
1264 1273 if not withthreads:
1265 1274 def d():
1266 1275 for pair in textpairs:
1267 1276 if xdiff:
1268 1277 mdiff.bdiff.xdiffblocks(*pair)
1269 1278 elif blocks:
1270 1279 mdiff.bdiff.blocks(*pair)
1271 1280 else:
1272 1281 mdiff.textdiff(*pair)
1273 1282 else:
1274 1283 q = queue()
1275 1284 for i in _xrange(threads):
1276 1285 q.put(None)
1277 1286 ready = threading.Condition()
1278 1287 done = threading.Event()
1279 1288 for i in _xrange(threads):
1280 1289 threading.Thread(target=_bdiffworker,
1281 1290 args=(q, blocks, xdiff, ready, done)).start()
1282 1291 q.join()
1283 1292 def d():
1284 1293 for pair in textpairs:
1285 1294 q.put(pair)
1286 1295 for i in _xrange(threads):
1287 1296 q.put(None)
1288 1297 with ready:
1289 1298 ready.notify_all()
1290 1299 q.join()
1291 1300 timer, fm = gettimer(ui, opts)
1292 1301 timer(d)
1293 1302 fm.end()
1294 1303
1295 1304 if withthreads:
1296 1305 done.set()
1297 1306 for i in _xrange(threads):
1298 1307 q.put(None)
1299 1308 with ready:
1300 1309 ready.notify_all()
1301 1310
1302 1311 @command(b'perfunidiff', revlogopts + formatteropts + [
1303 1312 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1304 1313 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
1305 1314 ], b'-c|-m|FILE REV')
1306 1315 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1307 1316 """benchmark a unified diff between revisions
1308 1317
1309 1318 This doesn't include any copy tracing - it's just a unified diff
1310 1319 of the texts.
1311 1320
1312 1321 By default, benchmark a diff between its delta parent and itself.
1313 1322
1314 1323 With ``--count``, benchmark diffs between delta parents and self for N
1315 1324 revisions starting at the specified revision.
1316 1325
1317 1326 With ``--alldata``, assume the requested revision is a changeset and
1318 1327 measure diffs for all changes related to that changeset (manifest
1319 1328 and filelogs).
1320 1329 """
1321 1330 opts = _byteskwargs(opts)
1322 1331 if opts[b'alldata']:
1323 1332 opts[b'changelog'] = True
1324 1333
1325 1334 if opts.get(b'changelog') or opts.get(b'manifest'):
1326 1335 file_, rev = None, file_
1327 1336 elif rev is None:
1328 1337 raise error.CommandError(b'perfunidiff', b'invalid arguments')
1329 1338
1330 1339 textpairs = []
1331 1340
1332 1341 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
1333 1342
1334 1343 startrev = r.rev(r.lookup(rev))
1335 1344 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1336 1345 if opts[b'alldata']:
1337 1346 # Load revisions associated with changeset.
1338 1347 ctx = repo[rev]
1339 1348 mtext = _manifestrevision(repo, ctx.manifestnode())
1340 1349 for pctx in ctx.parents():
1341 1350 pman = _manifestrevision(repo, pctx.manifestnode())
1342 1351 textpairs.append((pman, mtext))
1343 1352
1344 1353 # Load filelog revisions by iterating manifest delta.
1345 1354 man = ctx.manifest()
1346 1355 pman = ctx.p1().manifest()
1347 1356 for filename, change in pman.diff(man).items():
1348 1357 fctx = repo.file(filename)
1349 1358 f1 = fctx.revision(change[0][0] or -1)
1350 1359 f2 = fctx.revision(change[1][0] or -1)
1351 1360 textpairs.append((f1, f2))
1352 1361 else:
1353 1362 dp = r.deltaparent(rev)
1354 1363 textpairs.append((r.revision(dp), r.revision(rev)))
1355 1364
1356 1365 def d():
1357 1366 for left, right in textpairs:
1358 1367 # The date strings don't matter, so we pass empty strings.
1359 1368 headerlines, hunks = mdiff.unidiff(
1360 1369 left, b'', right, b'', b'left', b'right', binary=False)
1361 1370 # consume iterators in roughly the way patch.py does
1362 1371 b'\n'.join(headerlines)
1363 1372 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1364 1373 timer, fm = gettimer(ui, opts)
1365 1374 timer(d)
1366 1375 fm.end()
1367 1376
1368 1377 @command(b'perfdiffwd', formatteropts)
1369 1378 def perfdiffwd(ui, repo, **opts):
1370 1379 """Profile diff of working directory changes"""
1371 1380 opts = _byteskwargs(opts)
1372 1381 timer, fm = gettimer(ui, opts)
1373 1382 options = {
1374 1383 b'w': b'ignore_all_space',
1375 1384 b'b': b'ignore_space_change',
1376 1385 b'B': b'ignore_blank_lines',
1377 1386 }
1378 1387
1379 1388 for diffopt in (b'', b'w', b'b', b'B', b'wB'):
1380 1389 opts = dict((options[c], b'1') for c in diffopt)
1381 1390 def d():
1382 1391 ui.pushbuffer()
1383 1392 commands.diff(ui, repo, **opts)
1384 1393 ui.popbuffer()
1385 1394 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
1386 1395 timer(d, title)
1387 1396 fm.end()
1388 1397
1389 1398 @command(b'perfrevlogindex', revlogopts + formatteropts,
1390 1399 b'-c|-m|FILE')
1391 1400 def perfrevlogindex(ui, repo, file_=None, **opts):
1392 1401 """Benchmark operations against a revlog index.
1393 1402
1394 1403 This tests constructing a revlog instance, reading index data,
1395 1404 parsing index data, and performing various operations related to
1396 1405 index data.
1397 1406 """
1398 1407
1399 1408 opts = _byteskwargs(opts)
1400 1409
1401 1410 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
1402 1411
1403 1412 opener = getattr(rl, 'opener') # trick linter
1404 1413 indexfile = rl.indexfile
1405 1414 data = opener.read(indexfile)
1406 1415
1407 1416 header = struct.unpack(b'>I', data[0:4])[0]
1408 1417 version = header & 0xFFFF
1409 1418 if version == 1:
1410 1419 revlogio = revlog.revlogio()
1411 1420 inline = header & (1 << 16)
1412 1421 else:
1413 1422 raise error.Abort((b'unsupported revlog version: %d') % version)
1414 1423
1415 1424 rllen = len(rl)
1416 1425
1417 1426 node0 = rl.node(0)
1418 1427 node25 = rl.node(rllen // 4)
1419 1428 node50 = rl.node(rllen // 2)
1420 1429 node75 = rl.node(rllen // 4 * 3)
1421 1430 node100 = rl.node(rllen - 1)
1422 1431
1423 1432 allrevs = range(rllen)
1424 1433 allrevsrev = list(reversed(allrevs))
1425 1434 allnodes = [rl.node(rev) for rev in range(rllen)]
1426 1435 allnodesrev = list(reversed(allnodes))
1427 1436
1428 1437 def constructor():
1429 1438 revlog.revlog(opener, indexfile)
1430 1439
1431 1440 def read():
1432 1441 with opener(indexfile) as fh:
1433 1442 fh.read()
1434 1443
1435 1444 def parseindex():
1436 1445 revlogio.parseindex(data, inline)
1437 1446
1438 1447 def getentry(revornode):
1439 1448 index = revlogio.parseindex(data, inline)[0]
1440 1449 index[revornode]
1441 1450
1442 1451 def getentries(revs, count=1):
1443 1452 index = revlogio.parseindex(data, inline)[0]
1444 1453
1445 1454 for i in range(count):
1446 1455 for rev in revs:
1447 1456 index[rev]
1448 1457
1449 1458 def resolvenode(node):
1450 1459 nodemap = revlogio.parseindex(data, inline)[1]
1451 1460 # This only works for the C code.
1452 1461 if nodemap is None:
1453 1462 return
1454 1463
1455 1464 try:
1456 1465 nodemap[node]
1457 1466 except error.RevlogError:
1458 1467 pass
1459 1468
1460 1469 def resolvenodes(nodes, count=1):
1461 1470 nodemap = revlogio.parseindex(data, inline)[1]
1462 1471 if nodemap is None:
1463 1472 return
1464 1473
1465 1474 for i in range(count):
1466 1475 for node in nodes:
1467 1476 try:
1468 1477 nodemap[node]
1469 1478 except error.RevlogError:
1470 1479 pass
1471 1480
1472 1481 benches = [
1473 1482 (constructor, b'revlog constructor'),
1474 1483 (read, b'read'),
1475 1484 (parseindex, b'create index object'),
1476 1485 (lambda: getentry(0), b'retrieve index entry for rev 0'),
1477 1486 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
1478 1487 (lambda: resolvenode(node0), b'look up node at rev 0'),
1479 1488 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
1480 1489 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
1481 1490 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
1482 1491 (lambda: resolvenode(node100), b'look up node at tip'),
1483 1492 # 2x variation is to measure caching impact.
1484 1493 (lambda: resolvenodes(allnodes),
1485 1494 b'look up all nodes (forward)'),
1486 1495 (lambda: resolvenodes(allnodes, 2),
1487 1496 b'look up all nodes 2x (forward)'),
1488 1497 (lambda: resolvenodes(allnodesrev),
1489 1498 b'look up all nodes (reverse)'),
1490 1499 (lambda: resolvenodes(allnodesrev, 2),
1491 1500 b'look up all nodes 2x (reverse)'),
1492 1501 (lambda: getentries(allrevs),
1493 1502 b'retrieve all index entries (forward)'),
1494 1503 (lambda: getentries(allrevs, 2),
1495 1504 b'retrieve all index entries 2x (forward)'),
1496 1505 (lambda: getentries(allrevsrev),
1497 1506 b'retrieve all index entries (reverse)'),
1498 1507 (lambda: getentries(allrevsrev, 2),
1499 1508 b'retrieve all index entries 2x (reverse)'),
1500 1509 ]
1501 1510
1502 1511 for fn, title in benches:
1503 1512 timer, fm = gettimer(ui, opts)
1504 1513 timer(fn, title=title)
1505 1514 fm.end()
1506 1515
1507 1516 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
1508 1517 [(b'd', b'dist', 100, b'distance between the revisions'),
1509 1518 (b's', b'startrev', 0, b'revision to start reading at'),
1510 1519 (b'', b'reverse', False, b'read in reverse')],
1511 1520 b'-c|-m|FILE')
1512 1521 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
1513 1522 **opts):
1514 1523 """Benchmark reading a series of revisions from a revlog.
1515 1524
1516 1525 By default, we read every ``-d/--dist`` revision from 0 to tip of
1517 1526 the specified revlog.
1518 1527
1519 1528 The start revision can be defined via ``-s/--startrev``.
1520 1529 """
1521 1530 opts = _byteskwargs(opts)
1522 1531
1523 1532 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
1524 1533 rllen = getlen(ui)(rl)
1525 1534
1526 1535 if startrev < 0:
1527 1536 startrev = rllen + startrev
1528 1537
1529 1538 def d():
1530 1539 rl.clearcaches()
1531 1540
1532 1541 beginrev = startrev
1533 1542 endrev = rllen
1534 1543 dist = opts[b'dist']
1535 1544
1536 1545 if reverse:
1537 1546 beginrev, endrev = endrev, beginrev
1538 1547 dist = -1 * dist
1539 1548
1540 1549 for x in _xrange(beginrev, endrev, dist):
1541 1550 # Old revisions don't support passing int.
1542 1551 n = rl.node(x)
1543 1552 rl.revision(n)
1544 1553
1545 1554 timer, fm = gettimer(ui, opts)
1546 1555 timer(d)
1547 1556 fm.end()
1548 1557
1549 1558 @command(b'perfrevlogchunks', revlogopts + formatteropts +
1550 1559 [(b'e', b'engines', b'', b'compression engines to use'),
1551 1560 (b's', b'startrev', 0, b'revision to start at')],
1552 1561 b'-c|-m|FILE')
1553 1562 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
1554 1563 """Benchmark operations on revlog chunks.
1555 1564
1556 1565 Logically, each revlog is a collection of fulltext revisions. However,
1557 1566 stored within each revlog are "chunks" of possibly compressed data. This
1558 1567 data needs to be read and decompressed or compressed and written.
1559 1568
1560 1569 This command measures the time it takes to read+decompress and recompress
1561 1570 chunks in a revlog. It effectively isolates I/O and compression performance.
1562 1571 For measurements of higher-level operations like resolving revisions,
1563 1572 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
1564 1573 """
1565 1574 opts = _byteskwargs(opts)
1566 1575
1567 1576 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
1568 1577
1569 1578 # _chunkraw was renamed to _getsegmentforrevs.
1570 1579 try:
1571 1580 segmentforrevs = rl._getsegmentforrevs
1572 1581 except AttributeError:
1573 1582 segmentforrevs = rl._chunkraw
1574 1583
1575 1584 # Verify engines argument.
1576 1585 if engines:
1577 1586 engines = set(e.strip() for e in engines.split(b','))
1578 1587 for engine in engines:
1579 1588 try:
1580 1589 util.compressionengines[engine]
1581 1590 except KeyError:
1582 1591 raise error.Abort(b'unknown compression engine: %s' % engine)
1583 1592 else:
1584 1593 engines = []
1585 1594 for e in util.compengines:
1586 1595 engine = util.compengines[e]
1587 1596 try:
1588 1597 if engine.available():
1589 1598 engine.revlogcompressor().compress(b'dummy')
1590 1599 engines.append(e)
1591 1600 except NotImplementedError:
1592 1601 pass
1593 1602
1594 1603 revs = list(rl.revs(startrev, len(rl) - 1))
1595 1604
1596 1605 def rlfh(rl):
1597 1606 if rl._inline:
1598 1607 return getsvfs(repo)(rl.indexfile)
1599 1608 else:
1600 1609 return getsvfs(repo)(rl.datafile)
1601 1610
1602 1611 def doread():
1603 1612 rl.clearcaches()
1604 1613 for rev in revs:
1605 1614 segmentforrevs(rev, rev)
1606 1615
1607 1616 def doreadcachedfh():
1608 1617 rl.clearcaches()
1609 1618 fh = rlfh(rl)
1610 1619 for rev in revs:
1611 1620 segmentforrevs(rev, rev, df=fh)
1612 1621
1613 1622 def doreadbatch():
1614 1623 rl.clearcaches()
1615 1624 segmentforrevs(revs[0], revs[-1])
1616 1625
1617 1626 def doreadbatchcachedfh():
1618 1627 rl.clearcaches()
1619 1628 fh = rlfh(rl)
1620 1629 segmentforrevs(revs[0], revs[-1], df=fh)
1621 1630
1622 1631 def dochunk():
1623 1632 rl.clearcaches()
1624 1633 fh = rlfh(rl)
1625 1634 for rev in revs:
1626 1635 rl._chunk(rev, df=fh)
1627 1636
1628 1637 chunks = [None]
1629 1638
1630 1639 def dochunkbatch():
1631 1640 rl.clearcaches()
1632 1641 fh = rlfh(rl)
1633 1642 # Save chunks as a side-effect.
1634 1643 chunks[0] = rl._chunks(revs, df=fh)
1635 1644
1636 1645 def docompress(compressor):
1637 1646 rl.clearcaches()
1638 1647
1639 1648 try:
1640 1649 # Swap in the requested compression engine.
1641 1650 oldcompressor = rl._compressor
1642 1651 rl._compressor = compressor
1643 1652 for chunk in chunks[0]:
1644 1653 rl.compress(chunk)
1645 1654 finally:
1646 1655 rl._compressor = oldcompressor
1647 1656
1648 1657 benches = [
1649 1658 (lambda: doread(), b'read'),
1650 1659 (lambda: doreadcachedfh(), b'read w/ reused fd'),
1651 1660 (lambda: doreadbatch(), b'read batch'),
1652 1661 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
1653 1662 (lambda: dochunk(), b'chunk'),
1654 1663 (lambda: dochunkbatch(), b'chunk batch'),
1655 1664 ]
1656 1665
1657 1666 for engine in sorted(engines):
1658 1667 compressor = util.compengines[engine].revlogcompressor()
1659 1668 benches.append((functools.partial(docompress, compressor),
1660 1669 b'compress w/ %s' % engine))
1661 1670
1662 1671 for fn, title in benches:
1663 1672 timer, fm = gettimer(ui, opts)
1664 1673 timer(fn, title=title)
1665 1674 fm.end()
1666 1675
1667 1676 @command(b'perfrevlogrevision', revlogopts + formatteropts +
1668 1677 [(b'', b'cache', False, b'use caches instead of clearing')],
1669 1678 b'-c|-m|FILE REV')
1670 1679 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
1671 1680 """Benchmark obtaining a revlog revision.
1672 1681
1673 1682 Obtaining a revlog revision consists of roughly the following steps:
1674 1683
1675 1684 1. Compute the delta chain
1676 1685 2. Obtain the raw chunks for that delta chain
1677 1686 3. Decompress each raw chunk
1678 1687 4. Apply binary patches to obtain fulltext
1679 1688 5. Verify hash of fulltext
1680 1689
1681 1690 This command measures the time spent in each of these phases.
1682 1691 """
1683 1692 opts = _byteskwargs(opts)
1684 1693
1685 1694 if opts.get(b'changelog') or opts.get(b'manifest'):
1686 1695 file_, rev = None, file_
1687 1696 elif rev is None:
1688 1697 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
1689 1698
1690 1699 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
1691 1700
1692 1701 # _chunkraw was renamed to _getsegmentforrevs.
1693 1702 try:
1694 1703 segmentforrevs = r._getsegmentforrevs
1695 1704 except AttributeError:
1696 1705 segmentforrevs = r._chunkraw
1697 1706
1698 1707 node = r.lookup(rev)
1699 1708 rev = r.rev(node)
1700 1709
1701 1710 def getrawchunks(data, chain):
1702 1711 start = r.start
1703 1712 length = r.length
1704 1713 inline = r._inline
1705 1714 iosize = r._io.size
1706 1715 buffer = util.buffer
1707 1716 offset = start(chain[0])
1708 1717
1709 1718 chunks = []
1710 1719 ladd = chunks.append
1711 1720
1712 1721 for rev in chain:
1713 1722 chunkstart = start(rev)
1714 1723 if inline:
1715 1724 chunkstart += (rev + 1) * iosize
1716 1725 chunklength = length(rev)
1717 1726 ladd(buffer(data, chunkstart - offset, chunklength))
1718 1727
1719 1728 return chunks
1720 1729
1721 1730 def dodeltachain(rev):
1722 1731 if not cache:
1723 1732 r.clearcaches()
1724 1733 r._deltachain(rev)
1725 1734
1726 1735 def doread(chain):
1727 1736 if not cache:
1728 1737 r.clearcaches()
1729 1738 segmentforrevs(chain[0], chain[-1])
1730 1739
1731 1740 def dorawchunks(data, chain):
1732 1741 if not cache:
1733 1742 r.clearcaches()
1734 1743 getrawchunks(data, chain)
1735 1744
1736 1745 def dodecompress(chunks):
1737 1746 decomp = r.decompress
1738 1747 for chunk in chunks:
1739 1748 decomp(chunk)
1740 1749
1741 1750 def dopatch(text, bins):
1742 1751 if not cache:
1743 1752 r.clearcaches()
1744 1753 mdiff.patches(text, bins)
1745 1754
1746 1755 def dohash(text):
1747 1756 if not cache:
1748 1757 r.clearcaches()
1749 1758 r.checkhash(text, node, rev=rev)
1750 1759
1751 1760 def dorevision():
1752 1761 if not cache:
1753 1762 r.clearcaches()
1754 1763 r.revision(node)
1755 1764
1756 1765 chain = r._deltachain(rev)[0]
1757 1766 data = segmentforrevs(chain[0], chain[-1])[1]
1758 1767 rawchunks = getrawchunks(data, chain)
1759 1768 bins = r._chunks(chain)
1760 1769 text = str(bins[0])
1761 1770 bins = bins[1:]
1762 1771 text = mdiff.patches(text, bins)
1763 1772
1764 1773 benches = [
1765 1774 (lambda: dorevision(), b'full'),
1766 1775 (lambda: dodeltachain(rev), b'deltachain'),
1767 1776 (lambda: doread(chain), b'read'),
1768 1777 (lambda: dorawchunks(data, chain), b'rawchunks'),
1769 1778 (lambda: dodecompress(rawchunks), b'decompress'),
1770 1779 (lambda: dopatch(text, bins), b'patch'),
1771 1780 (lambda: dohash(text), b'hash'),
1772 1781 ]
1773 1782
1774 1783 for fn, title in benches:
1775 1784 timer, fm = gettimer(ui, opts)
1776 1785 timer(fn, title=title)
1777 1786 fm.end()
1778 1787
1779 1788 @command(b'perfrevset',
1780 1789 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
1781 1790 (b'', b'contexts', False, b'obtain changectx for each revision')]
1782 1791 + formatteropts, b"REVSET")
1783 1792 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
1784 1793 """benchmark the execution time of a revset
1785 1794
1786 1795 Use the --clean option if need to evaluate the impact of build volatile
1787 1796 revisions set cache on the revset execution. Volatile cache hold filtered
1788 1797 and obsolete related cache."""
1789 1798 opts = _byteskwargs(opts)
1790 1799
1791 1800 timer, fm = gettimer(ui, opts)
1792 1801 def d():
1793 1802 if clear:
1794 1803 repo.invalidatevolatilesets()
1795 1804 if contexts:
1796 1805 for ctx in repo.set(expr): pass
1797 1806 else:
1798 1807 for r in repo.revs(expr): pass
1799 1808 timer(d)
1800 1809 fm.end()
1801 1810
1802 1811 @command(b'perfvolatilesets',
1803 1812 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
1804 1813 ] + formatteropts)
1805 1814 def perfvolatilesets(ui, repo, *names, **opts):
1806 1815 """benchmark the computation of various volatile set
1807 1816
1808 1817 Volatile set computes element related to filtering and obsolescence."""
1809 1818 opts = _byteskwargs(opts)
1810 1819 timer, fm = gettimer(ui, opts)
1811 1820 repo = repo.unfiltered()
1812 1821
1813 1822 def getobs(name):
1814 1823 def d():
1815 1824 repo.invalidatevolatilesets()
1816 1825 if opts[b'clear_obsstore']:
1817 1826 clearfilecache(repo, b'obsstore')
1818 1827 obsolete.getrevs(repo, name)
1819 1828 return d
1820 1829
1821 1830 allobs = sorted(obsolete.cachefuncs)
1822 1831 if names:
1823 1832 allobs = [n for n in allobs if n in names]
1824 1833
1825 1834 for name in allobs:
1826 1835 timer(getobs(name), title=name)
1827 1836
1828 1837 def getfiltered(name):
1829 1838 def d():
1830 1839 repo.invalidatevolatilesets()
1831 1840 if opts[b'clear_obsstore']:
1832 1841 clearfilecache(repo, b'obsstore')
1833 1842 repoview.filterrevs(repo, name)
1834 1843 return d
1835 1844
1836 1845 allfilter = sorted(repoview.filtertable)
1837 1846 if names:
1838 1847 allfilter = [n for n in allfilter if n in names]
1839 1848
1840 1849 for name in allfilter:
1841 1850 timer(getfiltered(name), title=name)
1842 1851 fm.end()
1843 1852
1844 1853 @command(b'perfbranchmap',
1845 1854 [(b'f', b'full', False,
1846 1855 b'Includes build time of subset'),
1847 1856 (b'', b'clear-revbranch', False,
1848 1857 b'purge the revbranch cache between computation'),
1849 1858 ] + formatteropts)
1850 1859 def perfbranchmap(ui, repo, *filternames, **opts):
1851 1860 """benchmark the update of a branchmap
1852 1861
1853 1862 This benchmarks the full repo.branchmap() call with read and write disabled
1854 1863 """
1855 1864 opts = _byteskwargs(opts)
1856 1865 full = opts.get(b"full", False)
1857 1866 clear_revbranch = opts.get(b"clear_revbranch", False)
1858 1867 timer, fm = gettimer(ui, opts)
1859 1868 def getbranchmap(filtername):
1860 1869 """generate a benchmark function for the filtername"""
1861 1870 if filtername is None:
1862 1871 view = repo
1863 1872 else:
1864 1873 view = repo.filtered(filtername)
1865 1874 def d():
1866 1875 if clear_revbranch:
1867 1876 repo.revbranchcache()._clear()
1868 1877 if full:
1869 1878 view._branchcaches.clear()
1870 1879 else:
1871 1880 view._branchcaches.pop(filtername, None)
1872 1881 view.branchmap()
1873 1882 return d
1874 1883 # add filter in smaller subset to bigger subset
1875 1884 possiblefilters = set(repoview.filtertable)
1876 1885 if filternames:
1877 1886 possiblefilters &= set(filternames)
1878 1887 subsettable = getbranchmapsubsettable()
1879 1888 allfilters = []
1880 1889 while possiblefilters:
1881 1890 for name in possiblefilters:
1882 1891 subset = subsettable.get(name)
1883 1892 if subset not in possiblefilters:
1884 1893 break
1885 1894 else:
1886 1895 assert False, b'subset cycle %s!' % possiblefilters
1887 1896 allfilters.append(name)
1888 1897 possiblefilters.remove(name)
1889 1898
1890 1899 # warm the cache
1891 1900 if not full:
1892 1901 for name in allfilters:
1893 1902 repo.filtered(name).branchmap()
1894 1903 if not filternames or b'unfiltered' in filternames:
1895 1904 # add unfiltered
1896 1905 allfilters.append(None)
1897 1906
1898 1907 branchcacheread = safeattrsetter(branchmap, b'read')
1899 1908 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
1900 1909 branchcacheread.set(lambda repo: None)
1901 1910 branchcachewrite.set(lambda bc, repo: None)
1902 1911 try:
1903 1912 for name in allfilters:
1904 1913 printname = name
1905 1914 if name is None:
1906 1915 printname = b'unfiltered'
1907 1916 timer(getbranchmap(name), title=str(printname))
1908 1917 finally:
1909 1918 branchcacheread.restore()
1910 1919 branchcachewrite.restore()
1911 1920 fm.end()
1912 1921
1913 1922 @command(b'perfbranchmapload', [
1914 1923 (b'f', b'filter', b'', b'Specify repoview filter'),
1915 1924 (b'', b'list', False, b'List brachmap filter caches'),
1916 1925 ] + formatteropts)
1917 1926 def perfbranchmapread(ui, repo, filter=b'', list=False, **opts):
1918 1927 """benchmark reading the branchmap"""
1919 1928 opts = _byteskwargs(opts)
1920 1929
1921 1930 if list:
1922 1931 for name, kind, st in repo.cachevfs.readdir(stat=True):
1923 1932 if name.startswith(b'branch2'):
1924 1933 filtername = name.partition(b'-')[2] or b'unfiltered'
1925 1934 ui.status(b'%s - %s\n'
1926 1935 % (filtername, util.bytecount(st.st_size)))
1927 1936 return
1928 1937 if filter:
1929 1938 repo = repoview.repoview(repo, filter)
1930 1939 else:
1931 1940 repo = repo.unfiltered()
1932 1941 # try once without timer, the filter may not be cached
1933 1942 if branchmap.read(repo) is None:
1934 1943 raise error.Abort(b'No brachmap cached for %s repo'
1935 1944 % (filter or b'unfiltered'))
1936 1945 timer, fm = gettimer(ui, opts)
1937 1946 timer(lambda: branchmap.read(repo) and None)
1938 1947 fm.end()
1939 1948
1940 1949 @command(b'perfloadmarkers')
1941 1950 def perfloadmarkers(ui, repo):
1942 1951 """benchmark the time to parse the on-disk markers for a repo
1943 1952
1944 1953 Result is the number of markers in the repo."""
1945 1954 timer, fm = gettimer(ui)
1946 1955 svfs = getsvfs(repo)
1947 1956 timer(lambda: len(obsolete.obsstore(svfs)))
1948 1957 fm.end()
1949 1958
1950 1959 @command(b'perflrucachedict', formatteropts +
1951 1960 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
1952 1961 (b'', b'mincost', 0, b'smallest cost of items in cache'),
1953 1962 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
1954 1963 (b'', b'size', 4, b'size of cache'),
1955 1964 (b'', b'gets', 10000, b'number of key lookups'),
1956 1965 (b'', b'sets', 10000, b'number of key sets'),
1957 1966 (b'', b'mixed', 10000, b'number of mixed mode operations'),
1958 1967 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
1959 1968 norepo=True)
1960 1969 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
1961 1970 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
1962 1971 opts = _byteskwargs(opts)
1963 1972
1964 1973 def doinit():
1965 1974 for i in _xrange(10000):
1966 1975 util.lrucachedict(size)
1967 1976
1968 1977 costrange = list(range(mincost, maxcost + 1))
1969 1978
1970 1979 values = []
1971 1980 for i in _xrange(size):
1972 1981 values.append(random.randint(0, _maxint))
1973 1982
1974 1983 # Get mode fills the cache and tests raw lookup performance with no
1975 1984 # eviction.
1976 1985 getseq = []
1977 1986 for i in _xrange(gets):
1978 1987 getseq.append(random.choice(values))
1979 1988
1980 1989 def dogets():
1981 1990 d = util.lrucachedict(size)
1982 1991 for v in values:
1983 1992 d[v] = v
1984 1993 for key in getseq:
1985 1994 value = d[key]
1986 1995 value # silence pyflakes warning
1987 1996
1988 1997 def dogetscost():
1989 1998 d = util.lrucachedict(size, maxcost=costlimit)
1990 1999 for i, v in enumerate(values):
1991 2000 d.insert(v, v, cost=costs[i])
1992 2001 for key in getseq:
1993 2002 try:
1994 2003 value = d[key]
1995 2004 value # silence pyflakes warning
1996 2005 except KeyError:
1997 2006 pass
1998 2007
1999 2008 # Set mode tests insertion speed with cache eviction.
2000 2009 setseq = []
2001 2010 costs = []
2002 2011 for i in _xrange(sets):
2003 2012 setseq.append(random.randint(0, _maxint))
2004 2013 costs.append(random.choice(costrange))
2005 2014
2006 2015 def doinserts():
2007 2016 d = util.lrucachedict(size)
2008 2017 for v in setseq:
2009 2018 d.insert(v, v)
2010 2019
2011 2020 def doinsertscost():
2012 2021 d = util.lrucachedict(size, maxcost=costlimit)
2013 2022 for i, v in enumerate(setseq):
2014 2023 d.insert(v, v, cost=costs[i])
2015 2024
2016 2025 def dosets():
2017 2026 d = util.lrucachedict(size)
2018 2027 for v in setseq:
2019 2028 d[v] = v
2020 2029
2021 2030 # Mixed mode randomly performs gets and sets with eviction.
2022 2031 mixedops = []
2023 2032 for i in _xrange(mixed):
2024 2033 r = random.randint(0, 100)
2025 2034 if r < mixedgetfreq:
2026 2035 op = 0
2027 2036 else:
2028 2037 op = 1
2029 2038
2030 2039 mixedops.append((op,
2031 2040 random.randint(0, size * 2),
2032 2041 random.choice(costrange)))
2033 2042
2034 2043 def domixed():
2035 2044 d = util.lrucachedict(size)
2036 2045
2037 2046 for op, v, cost in mixedops:
2038 2047 if op == 0:
2039 2048 try:
2040 2049 d[v]
2041 2050 except KeyError:
2042 2051 pass
2043 2052 else:
2044 2053 d[v] = v
2045 2054
2046 2055 def domixedcost():
2047 2056 d = util.lrucachedict(size, maxcost=costlimit)
2048 2057
2049 2058 for op, v, cost in mixedops:
2050 2059 if op == 0:
2051 2060 try:
2052 2061 d[v]
2053 2062 except KeyError:
2054 2063 pass
2055 2064 else:
2056 2065 d.insert(v, v, cost=cost)
2057 2066
2058 2067 benches = [
2059 2068 (doinit, b'init'),
2060 2069 ]
2061 2070
2062 2071 if costlimit:
2063 2072 benches.extend([
2064 2073 (dogetscost, b'gets w/ cost limit'),
2065 2074 (doinsertscost, b'inserts w/ cost limit'),
2066 2075 (domixedcost, b'mixed w/ cost limit'),
2067 2076 ])
2068 2077 else:
2069 2078 benches.extend([
2070 2079 (dogets, b'gets'),
2071 2080 (doinserts, b'inserts'),
2072 2081 (dosets, b'sets'),
2073 2082 (domixed, b'mixed')
2074 2083 ])
2075 2084
2076 2085 for fn, title in benches:
2077 2086 timer, fm = gettimer(ui, opts)
2078 2087 timer(fn, title=title)
2079 2088 fm.end()
2080 2089
2081 2090 @command(b'perfwrite', formatteropts)
2082 2091 def perfwrite(ui, repo, **opts):
2083 2092 """microbenchmark ui.write
2084 2093 """
2085 2094 opts = _byteskwargs(opts)
2086 2095
2087 2096 timer, fm = gettimer(ui, opts)
2088 2097 def write():
2089 2098 for i in range(100000):
2090 2099 ui.write((b'Testing write performance\n'))
2091 2100 timer(write)
2092 2101 fm.end()
2093 2102
2094 2103 def uisetup(ui):
2095 2104 if (util.safehasattr(cmdutil, b'openrevlog') and
2096 2105 not util.safehasattr(commands, b'debugrevlogopts')):
2097 2106 # for "historical portability":
2098 2107 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
2099 2108 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
2100 2109 # openrevlog() should cause failure, because it has been
2101 2110 # available since 3.5 (or 49c583ca48c4).
2102 2111 def openrevlog(orig, repo, cmd, file_, opts):
2103 2112 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
2104 2113 raise error.Abort(b"This version doesn't support --dir option",
2105 2114 hint=b"use 3.5 or later")
2106 2115 return orig(repo, cmd, file_, opts)
2107 2116 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
General Comments 0
You need to be logged in to leave comments. Login now