##// END OF EJS Templates
perf: use an explicit function in perfbranchmapload...
Boris Feld -
r40736:30f443d3 default
parent child Browse files
Show More
@@ -1,2420 +1,2422
1 1 # perf.py - performance test routines
2 2 '''helper extension to measure performance'''
3 3
4 4 # "historical portability" policy of perf.py:
5 5 #
6 6 # We have to do:
7 7 # - make perf.py "loadable" with as wide Mercurial version as possible
8 8 # This doesn't mean that perf commands work correctly with that Mercurial.
9 9 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
10 10 # - make historical perf command work correctly with as wide Mercurial
11 11 # version as possible
12 12 #
13 13 # We have to do, if possible with reasonable cost:
14 14 # - make recent perf command for historical feature work correctly
15 15 # with early Mercurial
16 16 #
17 17 # We don't have to do:
18 18 # - make perf command for recent feature work correctly with early
19 19 # Mercurial
20 20
21 21 from __future__ import absolute_import
22 22 import contextlib
23 23 import functools
24 24 import gc
25 25 import os
26 26 import random
27 27 import shutil
28 28 import struct
29 29 import sys
30 30 import tempfile
31 31 import threading
32 32 import time
33 33 from mercurial import (
34 34 changegroup,
35 35 cmdutil,
36 36 commands,
37 37 copies,
38 38 error,
39 39 extensions,
40 40 mdiff,
41 41 merge,
42 42 revlog,
43 43 util,
44 44 )
45 45
46 46 # for "historical portability":
47 47 # try to import modules separately (in dict order), and ignore
48 48 # failure, because these aren't available with early Mercurial
49 49 try:
50 50 from mercurial import branchmap # since 2.5 (or bcee63733aad)
51 51 except ImportError:
52 52 pass
53 53 try:
54 54 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
55 55 except ImportError:
56 56 pass
57 57 try:
58 58 from mercurial import registrar # since 3.7 (or 37d50250b696)
59 59 dir(registrar) # forcibly load it
60 60 except ImportError:
61 61 registrar = None
62 62 try:
63 63 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
64 64 except ImportError:
65 65 pass
66 66 try:
67 67 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
68 68 except ImportError:
69 69 pass
70 70
71 71 def identity(a):
72 72 return a
73 73
74 74 try:
75 75 from mercurial import pycompat
76 76 getargspec = pycompat.getargspec # added to module after 4.5
77 77 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
78 78 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
79 79 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
80 80 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
81 81 if pycompat.ispy3:
82 82 _maxint = sys.maxsize # per py3 docs for replacing maxint
83 83 else:
84 84 _maxint = sys.maxint
85 85 except (ImportError, AttributeError):
86 86 import inspect
87 87 getargspec = inspect.getargspec
88 88 _byteskwargs = identity
89 89 fsencode = identity # no py3 support
90 90 _maxint = sys.maxint # no py3 support
91 91 _sysstr = lambda x: x # no py3 support
92 92 _xrange = xrange
93 93
94 94 try:
95 95 # 4.7+
96 96 queue = pycompat.queue.Queue
97 97 except (AttributeError, ImportError):
98 98 # <4.7.
99 99 try:
100 100 queue = pycompat.queue
101 101 except (AttributeError, ImportError):
102 102 queue = util.queue
103 103
104 104 try:
105 105 from mercurial import logcmdutil
106 106 makelogtemplater = logcmdutil.maketemplater
107 107 except (AttributeError, ImportError):
108 108 try:
109 109 makelogtemplater = cmdutil.makelogtemplater
110 110 except (AttributeError, ImportError):
111 111 makelogtemplater = None
112 112
113 113 # for "historical portability":
114 114 # define util.safehasattr forcibly, because util.safehasattr has been
115 115 # available since 1.9.3 (or 94b200a11cf7)
116 116 _undefined = object()
117 117 def safehasattr(thing, attr):
118 118 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
119 119 setattr(util, 'safehasattr', safehasattr)
120 120
121 121 # for "historical portability":
122 122 # define util.timer forcibly, because util.timer has been available
123 123 # since ae5d60bb70c9
124 124 if safehasattr(time, 'perf_counter'):
125 125 util.timer = time.perf_counter
126 126 elif os.name == b'nt':
127 127 util.timer = time.clock
128 128 else:
129 129 util.timer = time.time
130 130
131 131 # for "historical portability":
132 132 # use locally defined empty option list, if formatteropts isn't
133 133 # available, because commands.formatteropts has been available since
134 134 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
135 135 # available since 2.2 (or ae5f92e154d3)
136 136 formatteropts = getattr(cmdutil, "formatteropts",
137 137 getattr(commands, "formatteropts", []))
138 138
139 139 # for "historical portability":
140 140 # use locally defined option list, if debugrevlogopts isn't available,
141 141 # because commands.debugrevlogopts has been available since 3.7 (or
142 142 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
143 143 # since 1.9 (or a79fea6b3e77).
144 144 revlogopts = getattr(cmdutil, "debugrevlogopts",
145 145 getattr(commands, "debugrevlogopts", [
146 146 (b'c', b'changelog', False, (b'open changelog')),
147 147 (b'm', b'manifest', False, (b'open manifest')),
148 148 (b'', b'dir', False, (b'open directory manifest')),
149 149 ]))
150 150
151 151 cmdtable = {}
152 152
153 153 # for "historical portability":
154 154 # define parsealiases locally, because cmdutil.parsealiases has been
155 155 # available since 1.5 (or 6252852b4332)
156 156 def parsealiases(cmd):
157 157 return cmd.split(b"|")
158 158
159 159 if safehasattr(registrar, 'command'):
160 160 command = registrar.command(cmdtable)
161 161 elif safehasattr(cmdutil, 'command'):
162 162 command = cmdutil.command(cmdtable)
163 163 if b'norepo' not in getargspec(command).args:
164 164 # for "historical portability":
165 165 # wrap original cmdutil.command, because "norepo" option has
166 166 # been available since 3.1 (or 75a96326cecb)
167 167 _command = command
168 168 def command(name, options=(), synopsis=None, norepo=False):
169 169 if norepo:
170 170 commands.norepo += b' %s' % b' '.join(parsealiases(name))
171 171 return _command(name, list(options), synopsis)
172 172 else:
173 173 # for "historical portability":
174 174 # define "@command" annotation locally, because cmdutil.command
175 175 # has been available since 1.9 (or 2daa5179e73f)
176 176 def command(name, options=(), synopsis=None, norepo=False):
177 177 def decorator(func):
178 178 if synopsis:
179 179 cmdtable[name] = func, list(options), synopsis
180 180 else:
181 181 cmdtable[name] = func, list(options)
182 182 if norepo:
183 183 commands.norepo += b' %s' % b' '.join(parsealiases(name))
184 184 return func
185 185 return decorator
186 186
187 187 try:
188 188 import mercurial.registrar
189 189 import mercurial.configitems
190 190 configtable = {}
191 191 configitem = mercurial.registrar.configitem(configtable)
192 192 configitem(b'perf', b'presleep',
193 193 default=mercurial.configitems.dynamicdefault,
194 194 )
195 195 configitem(b'perf', b'stub',
196 196 default=mercurial.configitems.dynamicdefault,
197 197 )
198 198 configitem(b'perf', b'parentscount',
199 199 default=mercurial.configitems.dynamicdefault,
200 200 )
201 201 configitem(b'perf', b'all-timing',
202 202 default=mercurial.configitems.dynamicdefault,
203 203 )
204 204 except (ImportError, AttributeError):
205 205 pass
206 206
207 207 def getlen(ui):
208 208 if ui.configbool(b"perf", b"stub", False):
209 209 return lambda x: 1
210 210 return len
211 211
212 212 def gettimer(ui, opts=None):
213 213 """return a timer function and formatter: (timer, formatter)
214 214
215 215 This function exists to gather the creation of formatter in a single
216 216 place instead of duplicating it in all performance commands."""
217 217
218 218 # enforce an idle period before execution to counteract power management
219 219 # experimental config: perf.presleep
220 220 time.sleep(getint(ui, b"perf", b"presleep", 1))
221 221
222 222 if opts is None:
223 223 opts = {}
224 224 # redirect all to stderr unless buffer api is in use
225 225 if not ui._buffers:
226 226 ui = ui.copy()
227 227 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
228 228 if uifout:
229 229 # for "historical portability":
230 230 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
231 231 uifout.set(ui.ferr)
232 232
233 233 # get a formatter
234 234 uiformatter = getattr(ui, 'formatter', None)
235 235 if uiformatter:
236 236 fm = uiformatter(b'perf', opts)
237 237 else:
238 238 # for "historical portability":
239 239 # define formatter locally, because ui.formatter has been
240 240 # available since 2.2 (or ae5f92e154d3)
241 241 from mercurial import node
242 242 class defaultformatter(object):
243 243 """Minimized composition of baseformatter and plainformatter
244 244 """
245 245 def __init__(self, ui, topic, opts):
246 246 self._ui = ui
247 247 if ui.debugflag:
248 248 self.hexfunc = node.hex
249 249 else:
250 250 self.hexfunc = node.short
251 251 def __nonzero__(self):
252 252 return False
253 253 __bool__ = __nonzero__
254 254 def startitem(self):
255 255 pass
256 256 def data(self, **data):
257 257 pass
258 258 def write(self, fields, deftext, *fielddata, **opts):
259 259 self._ui.write(deftext % fielddata, **opts)
260 260 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
261 261 if cond:
262 262 self._ui.write(deftext % fielddata, **opts)
263 263 def plain(self, text, **opts):
264 264 self._ui.write(text, **opts)
265 265 def end(self):
266 266 pass
267 267 fm = defaultformatter(ui, b'perf', opts)
268 268
269 269 # stub function, runs code only once instead of in a loop
270 270 # experimental config: perf.stub
271 271 if ui.configbool(b"perf", b"stub", False):
272 272 return functools.partial(stub_timer, fm), fm
273 273
274 274 # experimental config: perf.all-timing
275 275 displayall = ui.configbool(b"perf", b"all-timing", False)
276 276 return functools.partial(_timer, fm, displayall=displayall), fm
277 277
278 278 def stub_timer(fm, func, setup=None, title=None):
279 279 func()
280 280
281 281 @contextlib.contextmanager
282 282 def timeone():
283 283 r = []
284 284 ostart = os.times()
285 285 cstart = util.timer()
286 286 yield r
287 287 cstop = util.timer()
288 288 ostop = os.times()
289 289 a, b = ostart, ostop
290 290 r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
291 291
292 292 def _timer(fm, func, setup=None, title=None, displayall=False):
293 293 gc.collect()
294 294 results = []
295 295 begin = util.timer()
296 296 count = 0
297 297 while True:
298 298 if setup is not None:
299 299 setup()
300 300 with timeone() as item:
301 301 r = func()
302 302 count += 1
303 303 results.append(item[0])
304 304 cstop = util.timer()
305 305 if cstop - begin > 3 and count >= 100:
306 306 break
307 307 if cstop - begin > 10 and count >= 3:
308 308 break
309 309
310 310 formatone(fm, results, title=title, result=r,
311 311 displayall=displayall)
312 312
313 313 def formatone(fm, timings, title=None, result=None, displayall=False):
314 314
315 315 count = len(timings)
316 316
317 317 fm.startitem()
318 318
319 319 if title:
320 320 fm.write(b'title', b'! %s\n', title)
321 321 if result:
322 322 fm.write(b'result', b'! result: %s\n', result)
323 323 def display(role, entry):
324 324 prefix = b''
325 325 if role != b'best':
326 326 prefix = b'%s.' % role
327 327 fm.plain(b'!')
328 328 fm.write(prefix + b'wall', b' wall %f', entry[0])
329 329 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
330 330 fm.write(prefix + b'user', b' user %f', entry[1])
331 331 fm.write(prefix + b'sys', b' sys %f', entry[2])
332 332 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
333 333 fm.plain(b'\n')
334 334 timings.sort()
335 335 min_val = timings[0]
336 336 display(b'best', min_val)
337 337 if displayall:
338 338 max_val = timings[-1]
339 339 display(b'max', max_val)
340 340 avg = tuple([sum(x) / count for x in zip(*timings)])
341 341 display(b'avg', avg)
342 342 median = timings[len(timings) // 2]
343 343 display(b'median', median)
344 344
345 345 # utilities for historical portability
346 346
347 347 def getint(ui, section, name, default):
348 348 # for "historical portability":
349 349 # ui.configint has been available since 1.9 (or fa2b596db182)
350 350 v = ui.config(section, name, None)
351 351 if v is None:
352 352 return default
353 353 try:
354 354 return int(v)
355 355 except ValueError:
356 356 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
357 357 % (section, name, v))
358 358
359 359 def safeattrsetter(obj, name, ignoremissing=False):
360 360 """Ensure that 'obj' has 'name' attribute before subsequent setattr
361 361
362 362 This function is aborted, if 'obj' doesn't have 'name' attribute
363 363 at runtime. This avoids overlooking removal of an attribute, which
364 364 breaks assumption of performance measurement, in the future.
365 365
366 366 This function returns the object to (1) assign a new value, and
367 367 (2) restore an original value to the attribute.
368 368
369 369 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
370 370 abortion, and this function returns None. This is useful to
371 371 examine an attribute, which isn't ensured in all Mercurial
372 372 versions.
373 373 """
374 374 if not util.safehasattr(obj, name):
375 375 if ignoremissing:
376 376 return None
377 377 raise error.Abort((b"missing attribute %s of %s might break assumption"
378 378 b" of performance measurement") % (name, obj))
379 379
380 380 origvalue = getattr(obj, _sysstr(name))
381 381 class attrutil(object):
382 382 def set(self, newvalue):
383 383 setattr(obj, _sysstr(name), newvalue)
384 384 def restore(self):
385 385 setattr(obj, _sysstr(name), origvalue)
386 386
387 387 return attrutil()
388 388
389 389 # utilities to examine each internal API changes
390 390
391 391 def getbranchmapsubsettable():
392 392 # for "historical portability":
393 393 # subsettable is defined in:
394 394 # - branchmap since 2.9 (or 175c6fd8cacc)
395 395 # - repoview since 2.5 (or 59a9f18d4587)
396 396 for mod in (branchmap, repoview):
397 397 subsettable = getattr(mod, 'subsettable', None)
398 398 if subsettable:
399 399 return subsettable
400 400
401 401 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
402 402 # branchmap and repoview modules exist, but subsettable attribute
403 403 # doesn't)
404 404 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
405 405 hint=b"use 2.5 or later")
406 406
407 407 def getsvfs(repo):
408 408 """Return appropriate object to access files under .hg/store
409 409 """
410 410 # for "historical portability":
411 411 # repo.svfs has been available since 2.3 (or 7034365089bf)
412 412 svfs = getattr(repo, 'svfs', None)
413 413 if svfs:
414 414 return svfs
415 415 else:
416 416 return getattr(repo, 'sopener')
417 417
418 418 def getvfs(repo):
419 419 """Return appropriate object to access files under .hg
420 420 """
421 421 # for "historical portability":
422 422 # repo.vfs has been available since 2.3 (or 7034365089bf)
423 423 vfs = getattr(repo, 'vfs', None)
424 424 if vfs:
425 425 return vfs
426 426 else:
427 427 return getattr(repo, 'opener')
428 428
429 429 def repocleartagscachefunc(repo):
430 430 """Return the function to clear tags cache according to repo internal API
431 431 """
432 432 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
433 433 # in this case, setattr(repo, '_tagscache', None) or so isn't
434 434 # correct way to clear tags cache, because existing code paths
435 435 # expect _tagscache to be a structured object.
436 436 def clearcache():
437 437 # _tagscache has been filteredpropertycache since 2.5 (or
438 438 # 98c867ac1330), and delattr() can't work in such case
439 439 if b'_tagscache' in vars(repo):
440 440 del repo.__dict__[b'_tagscache']
441 441 return clearcache
442 442
443 443 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
444 444 if repotags: # since 1.4 (or 5614a628d173)
445 445 return lambda : repotags.set(None)
446 446
447 447 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
448 448 if repotagscache: # since 0.6 (or d7df759d0e97)
449 449 return lambda : repotagscache.set(None)
450 450
451 451 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
452 452 # this point, but it isn't so problematic, because:
453 453 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
454 454 # in perftags() causes failure soon
455 455 # - perf.py itself has been available since 1.1 (or eb240755386d)
456 456 raise error.Abort((b"tags API of this hg command is unknown"))
457 457
458 458 # utilities to clear cache
459 459
460 460 def clearfilecache(obj, attrname):
461 461 unfiltered = getattr(obj, 'unfiltered', None)
462 462 if unfiltered is not None:
463 463 obj = obj.unfiltered()
464 464 if attrname in vars(obj):
465 465 delattr(obj, attrname)
466 466 obj._filecache.pop(attrname, None)
467 467
468 468 # perf commands
469 469
470 470 @command(b'perfwalk', formatteropts)
471 471 def perfwalk(ui, repo, *pats, **opts):
472 472 opts = _byteskwargs(opts)
473 473 timer, fm = gettimer(ui, opts)
474 474 m = scmutil.match(repo[None], pats, {})
475 475 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
476 476 ignored=False))))
477 477 fm.end()
478 478
479 479 @command(b'perfannotate', formatteropts)
480 480 def perfannotate(ui, repo, f, **opts):
481 481 opts = _byteskwargs(opts)
482 482 timer, fm = gettimer(ui, opts)
483 483 fc = repo[b'.'][f]
484 484 timer(lambda: len(fc.annotate(True)))
485 485 fm.end()
486 486
487 487 @command(b'perfstatus',
488 488 [(b'u', b'unknown', False,
489 489 b'ask status to look for unknown files')] + formatteropts)
490 490 def perfstatus(ui, repo, **opts):
491 491 opts = _byteskwargs(opts)
492 492 #m = match.always(repo.root, repo.getcwd())
493 493 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
494 494 # False))))
495 495 timer, fm = gettimer(ui, opts)
496 496 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
497 497 fm.end()
498 498
499 499 @command(b'perfaddremove', formatteropts)
500 500 def perfaddremove(ui, repo, **opts):
501 501 opts = _byteskwargs(opts)
502 502 timer, fm = gettimer(ui, opts)
503 503 try:
504 504 oldquiet = repo.ui.quiet
505 505 repo.ui.quiet = True
506 506 matcher = scmutil.match(repo[None])
507 507 opts[b'dry_run'] = True
508 508 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
509 509 finally:
510 510 repo.ui.quiet = oldquiet
511 511 fm.end()
512 512
513 513 def clearcaches(cl):
514 514 # behave somewhat consistently across internal API changes
515 515 if util.safehasattr(cl, b'clearcaches'):
516 516 cl.clearcaches()
517 517 elif util.safehasattr(cl, b'_nodecache'):
518 518 from mercurial.node import nullid, nullrev
519 519 cl._nodecache = {nullid: nullrev}
520 520 cl._nodepos = None
521 521
522 522 @command(b'perfheads', formatteropts)
523 523 def perfheads(ui, repo, **opts):
524 524 opts = _byteskwargs(opts)
525 525 timer, fm = gettimer(ui, opts)
526 526 cl = repo.changelog
527 527 def d():
528 528 len(cl.headrevs())
529 529 clearcaches(cl)
530 530 timer(d)
531 531 fm.end()
532 532
533 533 @command(b'perftags', formatteropts)
534 534 def perftags(ui, repo, **opts):
535 535 import mercurial.changelog
536 536 import mercurial.manifest
537 537
538 538 opts = _byteskwargs(opts)
539 539 timer, fm = gettimer(ui, opts)
540 540 svfs = getsvfs(repo)
541 541 repocleartagscache = repocleartagscachefunc(repo)
542 542 def s():
543 543 repo.changelog = mercurial.changelog.changelog(svfs)
544 544 rootmanifest = mercurial.manifest.manifestrevlog(svfs)
545 545 repo.manifestlog = mercurial.manifest.manifestlog(svfs, repo,
546 546 rootmanifest)
547 547 repocleartagscache()
548 548 def t():
549 549 return len(repo.tags())
550 550 timer(t, setup=s)
551 551 fm.end()
552 552
553 553 @command(b'perfancestors', formatteropts)
554 554 def perfancestors(ui, repo, **opts):
555 555 opts = _byteskwargs(opts)
556 556 timer, fm = gettimer(ui, opts)
557 557 heads = repo.changelog.headrevs()
558 558 def d():
559 559 for a in repo.changelog.ancestors(heads):
560 560 pass
561 561 timer(d)
562 562 fm.end()
563 563
564 564 @command(b'perfancestorset', formatteropts)
565 565 def perfancestorset(ui, repo, revset, **opts):
566 566 opts = _byteskwargs(opts)
567 567 timer, fm = gettimer(ui, opts)
568 568 revs = repo.revs(revset)
569 569 heads = repo.changelog.headrevs()
570 570 def d():
571 571 s = repo.changelog.ancestors(heads)
572 572 for rev in revs:
573 573 rev in s
574 574 timer(d)
575 575 fm.end()
576 576
577 577 @command(b'perfbookmarks', formatteropts)
578 578 def perfbookmarks(ui, repo, **opts):
579 579 """benchmark parsing bookmarks from disk to memory"""
580 580 opts = _byteskwargs(opts)
581 581 timer, fm = gettimer(ui, opts)
582 582
583 583 def s():
584 584 clearfilecache(repo, b'_bookmarks')
585 585 def d():
586 586 repo._bookmarks
587 587 timer(d, setup=s)
588 588 fm.end()
589 589
590 590 @command(b'perfbundleread', formatteropts, b'BUNDLE')
591 591 def perfbundleread(ui, repo, bundlepath, **opts):
592 592 """Benchmark reading of bundle files.
593 593
594 594 This command is meant to isolate the I/O part of bundle reading as
595 595 much as possible.
596 596 """
597 597 from mercurial import (
598 598 bundle2,
599 599 exchange,
600 600 streamclone,
601 601 )
602 602
603 603 opts = _byteskwargs(opts)
604 604
605 605 def makebench(fn):
606 606 def run():
607 607 with open(bundlepath, b'rb') as fh:
608 608 bundle = exchange.readbundle(ui, fh, bundlepath)
609 609 fn(bundle)
610 610
611 611 return run
612 612
613 613 def makereadnbytes(size):
614 614 def run():
615 615 with open(bundlepath, b'rb') as fh:
616 616 bundle = exchange.readbundle(ui, fh, bundlepath)
617 617 while bundle.read(size):
618 618 pass
619 619
620 620 return run
621 621
622 622 def makestdioread(size):
623 623 def run():
624 624 with open(bundlepath, b'rb') as fh:
625 625 while fh.read(size):
626 626 pass
627 627
628 628 return run
629 629
630 630 # bundle1
631 631
632 632 def deltaiter(bundle):
633 633 for delta in bundle.deltaiter():
634 634 pass
635 635
636 636 def iterchunks(bundle):
637 637 for chunk in bundle.getchunks():
638 638 pass
639 639
640 640 # bundle2
641 641
642 642 def forwardchunks(bundle):
643 643 for chunk in bundle._forwardchunks():
644 644 pass
645 645
646 646 def iterparts(bundle):
647 647 for part in bundle.iterparts():
648 648 pass
649 649
650 650 def iterpartsseekable(bundle):
651 651 for part in bundle.iterparts(seekable=True):
652 652 pass
653 653
654 654 def seek(bundle):
655 655 for part in bundle.iterparts(seekable=True):
656 656 part.seek(0, os.SEEK_END)
657 657
658 658 def makepartreadnbytes(size):
659 659 def run():
660 660 with open(bundlepath, b'rb') as fh:
661 661 bundle = exchange.readbundle(ui, fh, bundlepath)
662 662 for part in bundle.iterparts():
663 663 while part.read(size):
664 664 pass
665 665
666 666 return run
667 667
668 668 benches = [
669 669 (makestdioread(8192), b'read(8k)'),
670 670 (makestdioread(16384), b'read(16k)'),
671 671 (makestdioread(32768), b'read(32k)'),
672 672 (makestdioread(131072), b'read(128k)'),
673 673 ]
674 674
675 675 with open(bundlepath, b'rb') as fh:
676 676 bundle = exchange.readbundle(ui, fh, bundlepath)
677 677
678 678 if isinstance(bundle, changegroup.cg1unpacker):
679 679 benches.extend([
680 680 (makebench(deltaiter), b'cg1 deltaiter()'),
681 681 (makebench(iterchunks), b'cg1 getchunks()'),
682 682 (makereadnbytes(8192), b'cg1 read(8k)'),
683 683 (makereadnbytes(16384), b'cg1 read(16k)'),
684 684 (makereadnbytes(32768), b'cg1 read(32k)'),
685 685 (makereadnbytes(131072), b'cg1 read(128k)'),
686 686 ])
687 687 elif isinstance(bundle, bundle2.unbundle20):
688 688 benches.extend([
689 689 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
690 690 (makebench(iterparts), b'bundle2 iterparts()'),
691 691 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
692 692 (makebench(seek), b'bundle2 part seek()'),
693 693 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
694 694 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
695 695 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
696 696 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
697 697 ])
698 698 elif isinstance(bundle, streamclone.streamcloneapplier):
699 699 raise error.Abort(b'stream clone bundles not supported')
700 700 else:
701 701 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
702 702
703 703 for fn, title in benches:
704 704 timer, fm = gettimer(ui, opts)
705 705 timer(fn, title=title)
706 706 fm.end()
707 707
708 708 @command(b'perfchangegroupchangelog', formatteropts +
709 709 [(b'', b'version', b'02', b'changegroup version'),
710 710 (b'r', b'rev', b'', b'revisions to add to changegroup')])
711 711 def perfchangegroupchangelog(ui, repo, version=b'02', rev=None, **opts):
712 712 """Benchmark producing a changelog group for a changegroup.
713 713
714 714 This measures the time spent processing the changelog during a
715 715 bundle operation. This occurs during `hg bundle` and on a server
716 716 processing a `getbundle` wire protocol request (handles clones
717 717 and pull requests).
718 718
719 719 By default, all revisions are added to the changegroup.
720 720 """
721 721 opts = _byteskwargs(opts)
722 722 cl = repo.changelog
723 723 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
724 724 bundler = changegroup.getbundler(version, repo)
725 725
726 726 def d():
727 727 state, chunks = bundler._generatechangelog(cl, nodes)
728 728 for chunk in chunks:
729 729 pass
730 730
731 731 timer, fm = gettimer(ui, opts)
732 732
733 733 # Terminal printing can interfere with timing. So disable it.
734 734 with ui.configoverride({(b'progress', b'disable'): True}):
735 735 timer(d)
736 736
737 737 fm.end()
738 738
739 739 @command(b'perfdirs', formatteropts)
740 740 def perfdirs(ui, repo, **opts):
741 741 opts = _byteskwargs(opts)
742 742 timer, fm = gettimer(ui, opts)
743 743 dirstate = repo.dirstate
744 744 b'a' in dirstate
745 745 def d():
746 746 dirstate.hasdir(b'a')
747 747 del dirstate._map._dirs
748 748 timer(d)
749 749 fm.end()
750 750
751 751 @command(b'perfdirstate', formatteropts)
752 752 def perfdirstate(ui, repo, **opts):
753 753 opts = _byteskwargs(opts)
754 754 timer, fm = gettimer(ui, opts)
755 755 b"a" in repo.dirstate
756 756 def d():
757 757 repo.dirstate.invalidate()
758 758 b"a" in repo.dirstate
759 759 timer(d)
760 760 fm.end()
761 761
762 762 @command(b'perfdirstatedirs', formatteropts)
763 763 def perfdirstatedirs(ui, repo, **opts):
764 764 opts = _byteskwargs(opts)
765 765 timer, fm = gettimer(ui, opts)
766 766 b"a" in repo.dirstate
767 767 def d():
768 768 repo.dirstate.hasdir(b"a")
769 769 del repo.dirstate._map._dirs
770 770 timer(d)
771 771 fm.end()
772 772
773 773 @command(b'perfdirstatefoldmap', formatteropts)
774 774 def perfdirstatefoldmap(ui, repo, **opts):
775 775 opts = _byteskwargs(opts)
776 776 timer, fm = gettimer(ui, opts)
777 777 dirstate = repo.dirstate
778 778 b'a' in dirstate
779 779 def d():
780 780 dirstate._map.filefoldmap.get(b'a')
781 781 del dirstate._map.filefoldmap
782 782 timer(d)
783 783 fm.end()
784 784
785 785 @command(b'perfdirfoldmap', formatteropts)
786 786 def perfdirfoldmap(ui, repo, **opts):
787 787 opts = _byteskwargs(opts)
788 788 timer, fm = gettimer(ui, opts)
789 789 dirstate = repo.dirstate
790 790 b'a' in dirstate
791 791 def d():
792 792 dirstate._map.dirfoldmap.get(b'a')
793 793 del dirstate._map.dirfoldmap
794 794 del dirstate._map._dirs
795 795 timer(d)
796 796 fm.end()
797 797
798 798 @command(b'perfdirstatewrite', formatteropts)
799 799 def perfdirstatewrite(ui, repo, **opts):
800 800 opts = _byteskwargs(opts)
801 801 timer, fm = gettimer(ui, opts)
802 802 ds = repo.dirstate
803 803 b"a" in ds
804 804 def d():
805 805 ds._dirty = True
806 806 ds.write(repo.currenttransaction())
807 807 timer(d)
808 808 fm.end()
809 809
810 810 @command(b'perfmergecalculate',
811 811 [(b'r', b'rev', b'.', b'rev to merge against')] + formatteropts)
812 812 def perfmergecalculate(ui, repo, rev, **opts):
813 813 opts = _byteskwargs(opts)
814 814 timer, fm = gettimer(ui, opts)
815 815 wctx = repo[None]
816 816 rctx = scmutil.revsingle(repo, rev, rev)
817 817 ancestor = wctx.ancestor(rctx)
818 818 # we don't want working dir files to be stat'd in the benchmark, so prime
819 819 # that cache
820 820 wctx.dirty()
821 821 def d():
822 822 # acceptremote is True because we don't want prompts in the middle of
823 823 # our benchmark
824 824 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
825 825 acceptremote=True, followcopies=True)
826 826 timer(d)
827 827 fm.end()
828 828
829 829 @command(b'perfpathcopies', [], b"REV REV")
830 830 def perfpathcopies(ui, repo, rev1, rev2, **opts):
831 831 opts = _byteskwargs(opts)
832 832 timer, fm = gettimer(ui, opts)
833 833 ctx1 = scmutil.revsingle(repo, rev1, rev1)
834 834 ctx2 = scmutil.revsingle(repo, rev2, rev2)
835 835 def d():
836 836 copies.pathcopies(ctx1, ctx2)
837 837 timer(d)
838 838 fm.end()
839 839
840 840 @command(b'perfphases',
841 841 [(b'', b'full', False, b'include file reading time too'),
842 842 ], b"")
843 843 def perfphases(ui, repo, **opts):
844 844 """benchmark phasesets computation"""
845 845 opts = _byteskwargs(opts)
846 846 timer, fm = gettimer(ui, opts)
847 847 _phases = repo._phasecache
848 848 full = opts.get(b'full')
849 849 def d():
850 850 phases = _phases
851 851 if full:
852 852 clearfilecache(repo, b'_phasecache')
853 853 phases = repo._phasecache
854 854 phases.invalidate()
855 855 phases.loadphaserevs(repo)
856 856 timer(d)
857 857 fm.end()
858 858
859 859 @command(b'perfphasesremote',
860 860 [], b"[DEST]")
861 861 def perfphasesremote(ui, repo, dest=None, **opts):
862 862 """benchmark time needed to analyse phases of the remote server"""
863 863 from mercurial.node import (
864 864 bin,
865 865 )
866 866 from mercurial import (
867 867 exchange,
868 868 hg,
869 869 phases,
870 870 )
871 871 opts = _byteskwargs(opts)
872 872 timer, fm = gettimer(ui, opts)
873 873
874 874 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
875 875 if not path:
876 876 raise error.Abort((b'default repository not configured!'),
877 877 hint=(b"see 'hg help config.paths'"))
878 878 dest = path.pushloc or path.loc
879 879 branches = (path.branch, opts.get(b'branch') or [])
880 880 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
881 881 revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get(b'rev'))
882 882 other = hg.peer(repo, opts, dest)
883 883
884 884 # easier to perform discovery through the operation
885 885 op = exchange.pushoperation(repo, other)
886 886 exchange._pushdiscoverychangeset(op)
887 887
888 888 remotesubset = op.fallbackheads
889 889
890 890 with other.commandexecutor() as e:
891 891 remotephases = e.callcommand(b'listkeys',
892 892 {b'namespace': b'phases'}).result()
893 893 del other
894 894 publishing = remotephases.get(b'publishing', False)
895 895 if publishing:
896 896 ui.status((b'publishing: yes\n'))
897 897 else:
898 898 ui.status((b'publishing: no\n'))
899 899
900 900 nodemap = repo.changelog.nodemap
901 901 nonpublishroots = 0
902 902 for nhex, phase in remotephases.iteritems():
903 903 if nhex == b'publishing': # ignore data related to publish option
904 904 continue
905 905 node = bin(nhex)
906 906 if node in nodemap and int(phase):
907 907 nonpublishroots += 1
908 908 ui.status((b'number of roots: %d\n') % len(remotephases))
909 909 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
910 910 def d():
911 911 phases.remotephasessummary(repo,
912 912 remotesubset,
913 913 remotephases)
914 914 timer(d)
915 915 fm.end()
916 916
917 917 @command(b'perfmanifest',[
918 918 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
919 919 (b'', b'clear-disk', False, b'clear on-disk caches too'),
920 920 ] + formatteropts, b'REV|NODE')
921 921 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
922 922 """benchmark the time to read a manifest from disk and return a usable
923 923 dict-like object
924 924
925 925 Manifest caches are cleared before retrieval."""
926 926 opts = _byteskwargs(opts)
927 927 timer, fm = gettimer(ui, opts)
928 928 if not manifest_rev:
929 929 ctx = scmutil.revsingle(repo, rev, rev)
930 930 t = ctx.manifestnode()
931 931 else:
932 932 from mercurial.node import bin
933 933
934 934 if len(rev) == 40:
935 935 t = bin(rev)
936 936 else:
937 937 try:
938 938 rev = int(rev)
939 939
940 940 if util.safehasattr(repo.manifestlog, b'getstorage'):
941 941 t = repo.manifestlog.getstorage(b'').node(rev)
942 942 else:
943 943 t = repo.manifestlog._revlog.lookup(rev)
944 944 except ValueError:
945 945 raise error.Abort(b'manifest revision must be integer or full '
946 946 b'node')
947 947 def d():
948 948 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
949 949 repo.manifestlog[t].read()
950 950 timer(d)
951 951 fm.end()
952 952
953 953 @command(b'perfchangeset', formatteropts)
954 954 def perfchangeset(ui, repo, rev, **opts):
955 955 opts = _byteskwargs(opts)
956 956 timer, fm = gettimer(ui, opts)
957 957 n = scmutil.revsingle(repo, rev).node()
958 958 def d():
959 959 repo.changelog.read(n)
960 960 #repo.changelog._cache = None
961 961 timer(d)
962 962 fm.end()
963 963
964 964 @command(b'perfindex', formatteropts)
965 965 def perfindex(ui, repo, **opts):
966 966 import mercurial.revlog
967 967 opts = _byteskwargs(opts)
968 968 timer, fm = gettimer(ui, opts)
969 969 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
970 970 n = repo[b"tip"].node()
971 971 svfs = getsvfs(repo)
972 972 def d():
973 973 cl = mercurial.revlog.revlog(svfs, b"00changelog.i")
974 974 cl.rev(n)
975 975 timer(d)
976 976 fm.end()
977 977
978 978 @command(b'perfstartup', formatteropts)
979 979 def perfstartup(ui, repo, **opts):
980 980 opts = _byteskwargs(opts)
981 981 timer, fm = gettimer(ui, opts)
982 982 def d():
983 983 if os.name != r'nt':
984 984 os.system(b"HGRCPATH= %s version -q > /dev/null" %
985 985 fsencode(sys.argv[0]))
986 986 else:
987 987 os.environ[r'HGRCPATH'] = r' '
988 988 os.system(r"%s version -q > NUL" % sys.argv[0])
989 989 timer(d)
990 990 fm.end()
991 991
992 992 @command(b'perfparents', formatteropts)
993 993 def perfparents(ui, repo, **opts):
994 994 opts = _byteskwargs(opts)
995 995 timer, fm = gettimer(ui, opts)
996 996 # control the number of commits perfparents iterates over
997 997 # experimental config: perf.parentscount
998 998 count = getint(ui, b"perf", b"parentscount", 1000)
999 999 if len(repo.changelog) < count:
1000 1000 raise error.Abort(b"repo needs %d commits for this test" % count)
1001 1001 repo = repo.unfiltered()
1002 1002 nl = [repo.changelog.node(i) for i in _xrange(count)]
1003 1003 def d():
1004 1004 for n in nl:
1005 1005 repo.changelog.parents(n)
1006 1006 timer(d)
1007 1007 fm.end()
1008 1008
1009 1009 @command(b'perfctxfiles', formatteropts)
1010 1010 def perfctxfiles(ui, repo, x, **opts):
1011 1011 opts = _byteskwargs(opts)
1012 1012 x = int(x)
1013 1013 timer, fm = gettimer(ui, opts)
1014 1014 def d():
1015 1015 len(repo[x].files())
1016 1016 timer(d)
1017 1017 fm.end()
1018 1018
1019 1019 @command(b'perfrawfiles', formatteropts)
1020 1020 def perfrawfiles(ui, repo, x, **opts):
1021 1021 opts = _byteskwargs(opts)
1022 1022 x = int(x)
1023 1023 timer, fm = gettimer(ui, opts)
1024 1024 cl = repo.changelog
1025 1025 def d():
1026 1026 len(cl.read(x)[3])
1027 1027 timer(d)
1028 1028 fm.end()
1029 1029
1030 1030 @command(b'perflookup', formatteropts)
1031 1031 def perflookup(ui, repo, rev, **opts):
1032 1032 opts = _byteskwargs(opts)
1033 1033 timer, fm = gettimer(ui, opts)
1034 1034 timer(lambda: len(repo.lookup(rev)))
1035 1035 fm.end()
1036 1036
1037 1037 @command(b'perflinelogedits',
1038 1038 [(b'n', b'edits', 10000, b'number of edits'),
1039 1039 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1040 1040 ], norepo=True)
1041 1041 def perflinelogedits(ui, **opts):
1042 1042 from mercurial import linelog
1043 1043
1044 1044 opts = _byteskwargs(opts)
1045 1045
1046 1046 edits = opts[b'edits']
1047 1047 maxhunklines = opts[b'max_hunk_lines']
1048 1048
1049 1049 maxb1 = 100000
1050 1050 random.seed(0)
1051 1051 randint = random.randint
1052 1052 currentlines = 0
1053 1053 arglist = []
1054 1054 for rev in _xrange(edits):
1055 1055 a1 = randint(0, currentlines)
1056 1056 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1057 1057 b1 = randint(0, maxb1)
1058 1058 b2 = randint(b1, b1 + maxhunklines)
1059 1059 currentlines += (b2 - b1) - (a2 - a1)
1060 1060 arglist.append((rev, a1, a2, b1, b2))
1061 1061
1062 1062 def d():
1063 1063 ll = linelog.linelog()
1064 1064 for args in arglist:
1065 1065 ll.replacelines(*args)
1066 1066
1067 1067 timer, fm = gettimer(ui, opts)
1068 1068 timer(d)
1069 1069 fm.end()
1070 1070
1071 1071 @command(b'perfrevrange', formatteropts)
1072 1072 def perfrevrange(ui, repo, *specs, **opts):
1073 1073 opts = _byteskwargs(opts)
1074 1074 timer, fm = gettimer(ui, opts)
1075 1075 revrange = scmutil.revrange
1076 1076 timer(lambda: len(revrange(repo, specs)))
1077 1077 fm.end()
1078 1078
1079 1079 @command(b'perfnodelookup', formatteropts)
1080 1080 def perfnodelookup(ui, repo, rev, **opts):
1081 1081 opts = _byteskwargs(opts)
1082 1082 timer, fm = gettimer(ui, opts)
1083 1083 import mercurial.revlog
1084 1084 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1085 1085 n = scmutil.revsingle(repo, rev).node()
1086 1086 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1087 1087 def d():
1088 1088 cl.rev(n)
1089 1089 clearcaches(cl)
1090 1090 timer(d)
1091 1091 fm.end()
1092 1092
1093 1093 @command(b'perflog',
1094 1094 [(b'', b'rename', False, b'ask log to follow renames')
1095 1095 ] + formatteropts)
1096 1096 def perflog(ui, repo, rev=None, **opts):
1097 1097 opts = _byteskwargs(opts)
1098 1098 if rev is None:
1099 1099 rev=[]
1100 1100 timer, fm = gettimer(ui, opts)
1101 1101 ui.pushbuffer()
1102 1102 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1103 1103 copies=opts.get(b'rename')))
1104 1104 ui.popbuffer()
1105 1105 fm.end()
1106 1106
1107 1107 @command(b'perfmoonwalk', formatteropts)
1108 1108 def perfmoonwalk(ui, repo, **opts):
1109 1109 """benchmark walking the changelog backwards
1110 1110
1111 1111 This also loads the changelog data for each revision in the changelog.
1112 1112 """
1113 1113 opts = _byteskwargs(opts)
1114 1114 timer, fm = gettimer(ui, opts)
1115 1115 def moonwalk():
1116 1116 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1117 1117 ctx = repo[i]
1118 1118 ctx.branch() # read changelog data (in addition to the index)
1119 1119 timer(moonwalk)
1120 1120 fm.end()
1121 1121
1122 1122 @command(b'perftemplating',
1123 1123 [(b'r', b'rev', [], b'revisions to run the template on'),
1124 1124 ] + formatteropts)
1125 1125 def perftemplating(ui, repo, testedtemplate=None, **opts):
1126 1126 """test the rendering time of a given template"""
1127 1127 if makelogtemplater is None:
1128 1128 raise error.Abort((b"perftemplating not available with this Mercurial"),
1129 1129 hint=b"use 4.3 or later")
1130 1130
1131 1131 opts = _byteskwargs(opts)
1132 1132
1133 1133 nullui = ui.copy()
1134 1134 nullui.fout = open(os.devnull, r'wb')
1135 1135 nullui.disablepager()
1136 1136 revs = opts.get(b'rev')
1137 1137 if not revs:
1138 1138 revs = [b'all()']
1139 1139 revs = list(scmutil.revrange(repo, revs))
1140 1140
1141 1141 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1142 1142 b' {author|person}: {desc|firstline}\n')
1143 1143 if testedtemplate is None:
1144 1144 testedtemplate = defaulttemplate
1145 1145 displayer = makelogtemplater(nullui, repo, testedtemplate)
1146 1146 def format():
1147 1147 for r in revs:
1148 1148 ctx = repo[r]
1149 1149 displayer.show(ctx)
1150 1150 displayer.flush(ctx)
1151 1151
1152 1152 timer, fm = gettimer(ui, opts)
1153 1153 timer(format)
1154 1154 fm.end()
1155 1155
1156 1156 @command(b'perfhelper-tracecopies', formatteropts +
1157 1157 [
1158 1158 (b'r', b'revs', [], b'restrict search to these revisions'),
1159 1159 ])
1160 1160 def perfhelpertracecopies(ui, repo, revs=[], **opts):
1161 1161 """find statistic about potential parameters for the `perftracecopies`
1162 1162
1163 1163 This command find source-destination pair relevant for copytracing testing.
1164 1164 It report value for some of the parameters that impact copy tracing time.
1165 1165 """
1166 1166 opts = _byteskwargs(opts)
1167 1167 fm = ui.formatter(b'perf', opts)
1168 1168 header = '%12s %12s %12s %12s\n'
1169 1169 output = ("%(source)12s %(destination)12s "
1170 1170 "%(nbrevs)12d %(nbmissingfiles)12d\n")
1171 1171 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
1172 1172
1173 1173 if not revs:
1174 1174 revs = ['all()']
1175 1175 revs = scmutil.revrange(repo, revs)
1176 1176
1177 1177 roi = repo.revs('merge() and %ld', revs)
1178 1178 for r in roi:
1179 1179 ctx = repo[r]
1180 1180 p1 = ctx.p1().rev()
1181 1181 p2 = ctx.p2().rev()
1182 1182 bases = repo.changelog._commonancestorsheads(p1, p2)
1183 1183 for p in (p1, p2):
1184 1184 for b in bases:
1185 1185 base = repo[b]
1186 1186 parent = repo[p]
1187 1187 missing = copies._computeforwardmissing(base, parent)
1188 1188 if not missing:
1189 1189 continue
1190 1190 fm.startitem()
1191 1191 data = {
1192 1192 b'source': base.hex(),
1193 1193 b'destination': parent.hex(),
1194 1194 b'nbrevs': len(repo.revs('%d::%d', b, p)),
1195 1195 b'nbmissingfiles': len(missing),
1196 1196 }
1197 1197 fm.data(**data)
1198 1198 out = data.copy()
1199 1199 out['source'] = fm.hexfunc(base.node())
1200 1200 out['destination'] = fm.hexfunc(parent.node())
1201 1201 fm.plain(output % out)
1202 1202 fm.end()
1203 1203
1204 1204 @command(b'perfcca', formatteropts)
1205 1205 def perfcca(ui, repo, **opts):
1206 1206 opts = _byteskwargs(opts)
1207 1207 timer, fm = gettimer(ui, opts)
1208 1208 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1209 1209 fm.end()
1210 1210
1211 1211 @command(b'perffncacheload', formatteropts)
1212 1212 def perffncacheload(ui, repo, **opts):
1213 1213 opts = _byteskwargs(opts)
1214 1214 timer, fm = gettimer(ui, opts)
1215 1215 s = repo.store
1216 1216 def d():
1217 1217 s.fncache._load()
1218 1218 timer(d)
1219 1219 fm.end()
1220 1220
1221 1221 @command(b'perffncachewrite', formatteropts)
1222 1222 def perffncachewrite(ui, repo, **opts):
1223 1223 opts = _byteskwargs(opts)
1224 1224 timer, fm = gettimer(ui, opts)
1225 1225 s = repo.store
1226 1226 lock = repo.lock()
1227 1227 s.fncache._load()
1228 1228 tr = repo.transaction(b'perffncachewrite')
1229 1229 tr.addbackup(b'fncache')
1230 1230 def d():
1231 1231 s.fncache._dirty = True
1232 1232 s.fncache.write(tr)
1233 1233 timer(d)
1234 1234 tr.close()
1235 1235 lock.release()
1236 1236 fm.end()
1237 1237
1238 1238 @command(b'perffncacheencode', formatteropts)
1239 1239 def perffncacheencode(ui, repo, **opts):
1240 1240 opts = _byteskwargs(opts)
1241 1241 timer, fm = gettimer(ui, opts)
1242 1242 s = repo.store
1243 1243 s.fncache._load()
1244 1244 def d():
1245 1245 for p in s.fncache.entries:
1246 1246 s.encode(p)
1247 1247 timer(d)
1248 1248 fm.end()
1249 1249
1250 1250 def _bdiffworker(q, blocks, xdiff, ready, done):
1251 1251 while not done.is_set():
1252 1252 pair = q.get()
1253 1253 while pair is not None:
1254 1254 if xdiff:
1255 1255 mdiff.bdiff.xdiffblocks(*pair)
1256 1256 elif blocks:
1257 1257 mdiff.bdiff.blocks(*pair)
1258 1258 else:
1259 1259 mdiff.textdiff(*pair)
1260 1260 q.task_done()
1261 1261 pair = q.get()
1262 1262 q.task_done() # for the None one
1263 1263 with ready:
1264 1264 ready.wait()
1265 1265
1266 1266 def _manifestrevision(repo, mnode):
1267 1267 ml = repo.manifestlog
1268 1268
1269 1269 if util.safehasattr(ml, b'getstorage'):
1270 1270 store = ml.getstorage(b'')
1271 1271 else:
1272 1272 store = ml._revlog
1273 1273
1274 1274 return store.revision(mnode)
1275 1275
1276 1276 @command(b'perfbdiff', revlogopts + formatteropts + [
1277 1277 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1278 1278 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1279 1279 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1280 1280 (b'', b'blocks', False, b'test computing diffs into blocks'),
1281 1281 (b'', b'xdiff', False, b'use xdiff algorithm'),
1282 1282 ],
1283 1283
1284 1284 b'-c|-m|FILE REV')
1285 1285 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1286 1286 """benchmark a bdiff between revisions
1287 1287
1288 1288 By default, benchmark a bdiff between its delta parent and itself.
1289 1289
1290 1290 With ``--count``, benchmark bdiffs between delta parents and self for N
1291 1291 revisions starting at the specified revision.
1292 1292
1293 1293 With ``--alldata``, assume the requested revision is a changeset and
1294 1294 measure bdiffs for all changes related to that changeset (manifest
1295 1295 and filelogs).
1296 1296 """
1297 1297 opts = _byteskwargs(opts)
1298 1298
1299 1299 if opts[b'xdiff'] and not opts[b'blocks']:
1300 1300 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1301 1301
1302 1302 if opts[b'alldata']:
1303 1303 opts[b'changelog'] = True
1304 1304
1305 1305 if opts.get(b'changelog') or opts.get(b'manifest'):
1306 1306 file_, rev = None, file_
1307 1307 elif rev is None:
1308 1308 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1309 1309
1310 1310 blocks = opts[b'blocks']
1311 1311 xdiff = opts[b'xdiff']
1312 1312 textpairs = []
1313 1313
1314 1314 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1315 1315
1316 1316 startrev = r.rev(r.lookup(rev))
1317 1317 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1318 1318 if opts[b'alldata']:
1319 1319 # Load revisions associated with changeset.
1320 1320 ctx = repo[rev]
1321 1321 mtext = _manifestrevision(repo, ctx.manifestnode())
1322 1322 for pctx in ctx.parents():
1323 1323 pman = _manifestrevision(repo, pctx.manifestnode())
1324 1324 textpairs.append((pman, mtext))
1325 1325
1326 1326 # Load filelog revisions by iterating manifest delta.
1327 1327 man = ctx.manifest()
1328 1328 pman = ctx.p1().manifest()
1329 1329 for filename, change in pman.diff(man).items():
1330 1330 fctx = repo.file(filename)
1331 1331 f1 = fctx.revision(change[0][0] or -1)
1332 1332 f2 = fctx.revision(change[1][0] or -1)
1333 1333 textpairs.append((f1, f2))
1334 1334 else:
1335 1335 dp = r.deltaparent(rev)
1336 1336 textpairs.append((r.revision(dp), r.revision(rev)))
1337 1337
1338 1338 withthreads = threads > 0
1339 1339 if not withthreads:
1340 1340 def d():
1341 1341 for pair in textpairs:
1342 1342 if xdiff:
1343 1343 mdiff.bdiff.xdiffblocks(*pair)
1344 1344 elif blocks:
1345 1345 mdiff.bdiff.blocks(*pair)
1346 1346 else:
1347 1347 mdiff.textdiff(*pair)
1348 1348 else:
1349 1349 q = queue()
1350 1350 for i in _xrange(threads):
1351 1351 q.put(None)
1352 1352 ready = threading.Condition()
1353 1353 done = threading.Event()
1354 1354 for i in _xrange(threads):
1355 1355 threading.Thread(target=_bdiffworker,
1356 1356 args=(q, blocks, xdiff, ready, done)).start()
1357 1357 q.join()
1358 1358 def d():
1359 1359 for pair in textpairs:
1360 1360 q.put(pair)
1361 1361 for i in _xrange(threads):
1362 1362 q.put(None)
1363 1363 with ready:
1364 1364 ready.notify_all()
1365 1365 q.join()
1366 1366 timer, fm = gettimer(ui, opts)
1367 1367 timer(d)
1368 1368 fm.end()
1369 1369
1370 1370 if withthreads:
1371 1371 done.set()
1372 1372 for i in _xrange(threads):
1373 1373 q.put(None)
1374 1374 with ready:
1375 1375 ready.notify_all()
1376 1376
1377 1377 @command(b'perfunidiff', revlogopts + formatteropts + [
1378 1378 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1379 1379 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
1380 1380 ], b'-c|-m|FILE REV')
1381 1381 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1382 1382 """benchmark a unified diff between revisions
1383 1383
1384 1384 This doesn't include any copy tracing - it's just a unified diff
1385 1385 of the texts.
1386 1386
1387 1387 By default, benchmark a diff between its delta parent and itself.
1388 1388
1389 1389 With ``--count``, benchmark diffs between delta parents and self for N
1390 1390 revisions starting at the specified revision.
1391 1391
1392 1392 With ``--alldata``, assume the requested revision is a changeset and
1393 1393 measure diffs for all changes related to that changeset (manifest
1394 1394 and filelogs).
1395 1395 """
1396 1396 opts = _byteskwargs(opts)
1397 1397 if opts[b'alldata']:
1398 1398 opts[b'changelog'] = True
1399 1399
1400 1400 if opts.get(b'changelog') or opts.get(b'manifest'):
1401 1401 file_, rev = None, file_
1402 1402 elif rev is None:
1403 1403 raise error.CommandError(b'perfunidiff', b'invalid arguments')
1404 1404
1405 1405 textpairs = []
1406 1406
1407 1407 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
1408 1408
1409 1409 startrev = r.rev(r.lookup(rev))
1410 1410 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1411 1411 if opts[b'alldata']:
1412 1412 # Load revisions associated with changeset.
1413 1413 ctx = repo[rev]
1414 1414 mtext = _manifestrevision(repo, ctx.manifestnode())
1415 1415 for pctx in ctx.parents():
1416 1416 pman = _manifestrevision(repo, pctx.manifestnode())
1417 1417 textpairs.append((pman, mtext))
1418 1418
1419 1419 # Load filelog revisions by iterating manifest delta.
1420 1420 man = ctx.manifest()
1421 1421 pman = ctx.p1().manifest()
1422 1422 for filename, change in pman.diff(man).items():
1423 1423 fctx = repo.file(filename)
1424 1424 f1 = fctx.revision(change[0][0] or -1)
1425 1425 f2 = fctx.revision(change[1][0] or -1)
1426 1426 textpairs.append((f1, f2))
1427 1427 else:
1428 1428 dp = r.deltaparent(rev)
1429 1429 textpairs.append((r.revision(dp), r.revision(rev)))
1430 1430
1431 1431 def d():
1432 1432 for left, right in textpairs:
1433 1433 # The date strings don't matter, so we pass empty strings.
1434 1434 headerlines, hunks = mdiff.unidiff(
1435 1435 left, b'', right, b'', b'left', b'right', binary=False)
1436 1436 # consume iterators in roughly the way patch.py does
1437 1437 b'\n'.join(headerlines)
1438 1438 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1439 1439 timer, fm = gettimer(ui, opts)
1440 1440 timer(d)
1441 1441 fm.end()
1442 1442
1443 1443 @command(b'perfdiffwd', formatteropts)
1444 1444 def perfdiffwd(ui, repo, **opts):
1445 1445 """Profile diff of working directory changes"""
1446 1446 opts = _byteskwargs(opts)
1447 1447 timer, fm = gettimer(ui, opts)
1448 1448 options = {
1449 1449 'w': 'ignore_all_space',
1450 1450 'b': 'ignore_space_change',
1451 1451 'B': 'ignore_blank_lines',
1452 1452 }
1453 1453
1454 1454 for diffopt in ('', 'w', 'b', 'B', 'wB'):
1455 1455 opts = dict((options[c], b'1') for c in diffopt)
1456 1456 def d():
1457 1457 ui.pushbuffer()
1458 1458 commands.diff(ui, repo, **opts)
1459 1459 ui.popbuffer()
1460 1460 diffopt = diffopt.encode('ascii')
1461 1461 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
1462 1462 timer(d, title=title)
1463 1463 fm.end()
1464 1464
1465 1465 @command(b'perfrevlogindex', revlogopts + formatteropts,
1466 1466 b'-c|-m|FILE')
1467 1467 def perfrevlogindex(ui, repo, file_=None, **opts):
1468 1468 """Benchmark operations against a revlog index.
1469 1469
1470 1470 This tests constructing a revlog instance, reading index data,
1471 1471 parsing index data, and performing various operations related to
1472 1472 index data.
1473 1473 """
1474 1474
1475 1475 opts = _byteskwargs(opts)
1476 1476
1477 1477 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
1478 1478
1479 1479 opener = getattr(rl, 'opener') # trick linter
1480 1480 indexfile = rl.indexfile
1481 1481 data = opener.read(indexfile)
1482 1482
1483 1483 header = struct.unpack(b'>I', data[0:4])[0]
1484 1484 version = header & 0xFFFF
1485 1485 if version == 1:
1486 1486 revlogio = revlog.revlogio()
1487 1487 inline = header & (1 << 16)
1488 1488 else:
1489 1489 raise error.Abort((b'unsupported revlog version: %d') % version)
1490 1490
1491 1491 rllen = len(rl)
1492 1492
1493 1493 node0 = rl.node(0)
1494 1494 node25 = rl.node(rllen // 4)
1495 1495 node50 = rl.node(rllen // 2)
1496 1496 node75 = rl.node(rllen // 4 * 3)
1497 1497 node100 = rl.node(rllen - 1)
1498 1498
1499 1499 allrevs = range(rllen)
1500 1500 allrevsrev = list(reversed(allrevs))
1501 1501 allnodes = [rl.node(rev) for rev in range(rllen)]
1502 1502 allnodesrev = list(reversed(allnodes))
1503 1503
1504 1504 def constructor():
1505 1505 revlog.revlog(opener, indexfile)
1506 1506
1507 1507 def read():
1508 1508 with opener(indexfile) as fh:
1509 1509 fh.read()
1510 1510
1511 1511 def parseindex():
1512 1512 revlogio.parseindex(data, inline)
1513 1513
1514 1514 def getentry(revornode):
1515 1515 index = revlogio.parseindex(data, inline)[0]
1516 1516 index[revornode]
1517 1517
1518 1518 def getentries(revs, count=1):
1519 1519 index = revlogio.parseindex(data, inline)[0]
1520 1520
1521 1521 for i in range(count):
1522 1522 for rev in revs:
1523 1523 index[rev]
1524 1524
1525 1525 def resolvenode(node):
1526 1526 nodemap = revlogio.parseindex(data, inline)[1]
1527 1527 # This only works for the C code.
1528 1528 if nodemap is None:
1529 1529 return
1530 1530
1531 1531 try:
1532 1532 nodemap[node]
1533 1533 except error.RevlogError:
1534 1534 pass
1535 1535
1536 1536 def resolvenodes(nodes, count=1):
1537 1537 nodemap = revlogio.parseindex(data, inline)[1]
1538 1538 if nodemap is None:
1539 1539 return
1540 1540
1541 1541 for i in range(count):
1542 1542 for node in nodes:
1543 1543 try:
1544 1544 nodemap[node]
1545 1545 except error.RevlogError:
1546 1546 pass
1547 1547
1548 1548 benches = [
1549 1549 (constructor, b'revlog constructor'),
1550 1550 (read, b'read'),
1551 1551 (parseindex, b'create index object'),
1552 1552 (lambda: getentry(0), b'retrieve index entry for rev 0'),
1553 1553 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
1554 1554 (lambda: resolvenode(node0), b'look up node at rev 0'),
1555 1555 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
1556 1556 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
1557 1557 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
1558 1558 (lambda: resolvenode(node100), b'look up node at tip'),
1559 1559 # 2x variation is to measure caching impact.
1560 1560 (lambda: resolvenodes(allnodes),
1561 1561 b'look up all nodes (forward)'),
1562 1562 (lambda: resolvenodes(allnodes, 2),
1563 1563 b'look up all nodes 2x (forward)'),
1564 1564 (lambda: resolvenodes(allnodesrev),
1565 1565 b'look up all nodes (reverse)'),
1566 1566 (lambda: resolvenodes(allnodesrev, 2),
1567 1567 b'look up all nodes 2x (reverse)'),
1568 1568 (lambda: getentries(allrevs),
1569 1569 b'retrieve all index entries (forward)'),
1570 1570 (lambda: getentries(allrevs, 2),
1571 1571 b'retrieve all index entries 2x (forward)'),
1572 1572 (lambda: getentries(allrevsrev),
1573 1573 b'retrieve all index entries (reverse)'),
1574 1574 (lambda: getentries(allrevsrev, 2),
1575 1575 b'retrieve all index entries 2x (reverse)'),
1576 1576 ]
1577 1577
1578 1578 for fn, title in benches:
1579 1579 timer, fm = gettimer(ui, opts)
1580 1580 timer(fn, title=title)
1581 1581 fm.end()
1582 1582
1583 1583 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
1584 1584 [(b'd', b'dist', 100, b'distance between the revisions'),
1585 1585 (b's', b'startrev', 0, b'revision to start reading at'),
1586 1586 (b'', b'reverse', False, b'read in reverse')],
1587 1587 b'-c|-m|FILE')
1588 1588 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
1589 1589 **opts):
1590 1590 """Benchmark reading a series of revisions from a revlog.
1591 1591
1592 1592 By default, we read every ``-d/--dist`` revision from 0 to tip of
1593 1593 the specified revlog.
1594 1594
1595 1595 The start revision can be defined via ``-s/--startrev``.
1596 1596 """
1597 1597 opts = _byteskwargs(opts)
1598 1598
1599 1599 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
1600 1600 rllen = getlen(ui)(rl)
1601 1601
1602 1602 if startrev < 0:
1603 1603 startrev = rllen + startrev
1604 1604
1605 1605 def d():
1606 1606 rl.clearcaches()
1607 1607
1608 1608 beginrev = startrev
1609 1609 endrev = rllen
1610 1610 dist = opts[b'dist']
1611 1611
1612 1612 if reverse:
1613 1613 beginrev, endrev = endrev - 1, beginrev - 1
1614 1614 dist = -1 * dist
1615 1615
1616 1616 for x in _xrange(beginrev, endrev, dist):
1617 1617 # Old revisions don't support passing int.
1618 1618 n = rl.node(x)
1619 1619 rl.revision(n)
1620 1620
1621 1621 timer, fm = gettimer(ui, opts)
1622 1622 timer(d)
1623 1623 fm.end()
1624 1624
1625 1625 @command(b'perfrevlogwrite', revlogopts + formatteropts +
1626 1626 [(b's', b'startrev', 1000, b'revision to start writing at'),
1627 1627 (b'', b'stoprev', -1, b'last revision to write'),
1628 1628 (b'', b'count', 3, b'last revision to write'),
1629 1629 (b'', b'details', False, b'print timing for every revisions tested'),
1630 1630 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
1631 1631 (b'', b'lazydeltabase', True, b'try the provided delta first'),
1632 1632 ],
1633 1633 b'-c|-m|FILE')
1634 1634 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
1635 1635 """Benchmark writing a series of revisions to a revlog.
1636 1636
1637 1637 Possible source values are:
1638 1638 * `full`: add from a full text (default).
1639 1639 * `parent-1`: add from a delta to the first parent
1640 1640 * `parent-2`: add from a delta to the second parent if it exists
1641 1641 (use a delta from the first parent otherwise)
1642 1642 * `parent-smallest`: add from the smallest delta (either p1 or p2)
1643 1643 * `storage`: add from the existing precomputed deltas
1644 1644 """
1645 1645 opts = _byteskwargs(opts)
1646 1646
1647 1647 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
1648 1648 rllen = getlen(ui)(rl)
1649 1649 if startrev < 0:
1650 1650 startrev = rllen + startrev
1651 1651 if stoprev < 0:
1652 1652 stoprev = rllen + stoprev
1653 1653
1654 1654 lazydeltabase = opts['lazydeltabase']
1655 1655 source = opts['source']
1656 1656 validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest',
1657 1657 b'storage')
1658 1658 if source not in validsource:
1659 1659 raise error.Abort('invalid source type: %s' % source)
1660 1660
1661 1661 ### actually gather results
1662 1662 count = opts['count']
1663 1663 if count <= 0:
1664 1664 raise error.Abort('invalide run count: %d' % count)
1665 1665 allresults = []
1666 1666 for c in range(count):
1667 1667 timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1,
1668 1668 lazydeltabase=lazydeltabase)
1669 1669 allresults.append(timing)
1670 1670
1671 1671 ### consolidate the results in a single list
1672 1672 results = []
1673 1673 for idx, (rev, t) in enumerate(allresults[0]):
1674 1674 ts = [t]
1675 1675 for other in allresults[1:]:
1676 1676 orev, ot = other[idx]
1677 1677 assert orev == rev
1678 1678 ts.append(ot)
1679 1679 results.append((rev, ts))
1680 1680 resultcount = len(results)
1681 1681
1682 1682 ### Compute and display relevant statistics
1683 1683
1684 1684 # get a formatter
1685 1685 fm = ui.formatter(b'perf', opts)
1686 1686 displayall = ui.configbool(b"perf", b"all-timing", False)
1687 1687
1688 1688 # print individual details if requested
1689 1689 if opts['details']:
1690 1690 for idx, item in enumerate(results, 1):
1691 1691 rev, data = item
1692 1692 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
1693 1693 formatone(fm, data, title=title, displayall=displayall)
1694 1694
1695 1695 # sorts results by median time
1696 1696 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
1697 1697 # list of (name, index) to display)
1698 1698 relevants = [
1699 1699 ("min", 0),
1700 1700 ("10%", resultcount * 10 // 100),
1701 1701 ("25%", resultcount * 25 // 100),
1702 1702 ("50%", resultcount * 70 // 100),
1703 1703 ("75%", resultcount * 75 // 100),
1704 1704 ("90%", resultcount * 90 // 100),
1705 1705 ("95%", resultcount * 95 // 100),
1706 1706 ("99%", resultcount * 99 // 100),
1707 1707 ("max", -1),
1708 1708 ]
1709 1709 if not ui.quiet:
1710 1710 for name, idx in relevants:
1711 1711 data = results[idx]
1712 1712 title = '%s of %d, rev %d' % (name, resultcount, data[0])
1713 1713 formatone(fm, data[1], title=title, displayall=displayall)
1714 1714
1715 1715 # XXX summing that many float will not be very precise, we ignore this fact
1716 1716 # for now
1717 1717 totaltime = []
1718 1718 for item in allresults:
1719 1719 totaltime.append((sum(x[1][0] for x in item),
1720 1720 sum(x[1][1] for x in item),
1721 1721 sum(x[1][2] for x in item),)
1722 1722 )
1723 1723 formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
1724 1724 displayall=displayall)
1725 1725 fm.end()
1726 1726
1727 1727 class _faketr(object):
1728 1728 def add(s, x, y, z=None):
1729 1729 return None
1730 1730
1731 1731 def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None,
1732 1732 lazydeltabase=True):
1733 1733 timings = []
1734 1734 tr = _faketr()
1735 1735 with _temprevlog(ui, orig, startrev) as dest:
1736 1736 dest._lazydeltabase = lazydeltabase
1737 1737 revs = list(orig.revs(startrev, stoprev))
1738 1738 total = len(revs)
1739 1739 topic = 'adding'
1740 1740 if runidx is not None:
1741 1741 topic += ' (run #%d)' % runidx
1742 1742 for idx, rev in enumerate(revs):
1743 1743 ui.progress(topic, idx, unit='revs', total=total)
1744 1744 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
1745 1745 with timeone() as r:
1746 1746 dest.addrawrevision(*addargs, **addkwargs)
1747 1747 timings.append((rev, r[0]))
1748 1748 ui.progress(topic, total, unit='revs', total=total)
1749 1749 ui.progress(topic, None, unit='revs', total=total)
1750 1750 return timings
1751 1751
1752 1752 def _getrevisionseed(orig, rev, tr, source):
1753 1753 from mercurial.node import nullid
1754 1754
1755 1755 linkrev = orig.linkrev(rev)
1756 1756 node = orig.node(rev)
1757 1757 p1, p2 = orig.parents(node)
1758 1758 flags = orig.flags(rev)
1759 1759 cachedelta = None
1760 1760 text = None
1761 1761
1762 1762 if source == b'full':
1763 1763 text = orig.revision(rev)
1764 1764 elif source == b'parent-1':
1765 1765 baserev = orig.rev(p1)
1766 1766 cachedelta = (baserev, orig.revdiff(p1, rev))
1767 1767 elif source == b'parent-2':
1768 1768 parent = p2
1769 1769 if p2 == nullid:
1770 1770 parent = p1
1771 1771 baserev = orig.rev(parent)
1772 1772 cachedelta = (baserev, orig.revdiff(parent, rev))
1773 1773 elif source == b'parent-smallest':
1774 1774 p1diff = orig.revdiff(p1, rev)
1775 1775 parent = p1
1776 1776 diff = p1diff
1777 1777 if p2 != nullid:
1778 1778 p2diff = orig.revdiff(p2, rev)
1779 1779 if len(p1diff) > len(p2diff):
1780 1780 parent = p2
1781 1781 diff = p2diff
1782 1782 baserev = orig.rev(parent)
1783 1783 cachedelta = (baserev, diff)
1784 1784 elif source == b'storage':
1785 1785 baserev = orig.deltaparent(rev)
1786 1786 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
1787 1787
1788 1788 return ((text, tr, linkrev, p1, p2),
1789 1789 {'node': node, 'flags': flags, 'cachedelta': cachedelta})
1790 1790
1791 1791 @contextlib.contextmanager
1792 1792 def _temprevlog(ui, orig, truncaterev):
1793 1793 from mercurial import vfs as vfsmod
1794 1794
1795 1795 if orig._inline:
1796 1796 raise error.Abort('not supporting inline revlog (yet)')
1797 1797
1798 1798 origindexpath = orig.opener.join(orig.indexfile)
1799 1799 origdatapath = orig.opener.join(orig.datafile)
1800 1800 indexname = 'revlog.i'
1801 1801 dataname = 'revlog.d'
1802 1802
1803 1803 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
1804 1804 try:
1805 1805 # copy the data file in a temporary directory
1806 1806 ui.debug('copying data in %s\n' % tmpdir)
1807 1807 destindexpath = os.path.join(tmpdir, 'revlog.i')
1808 1808 destdatapath = os.path.join(tmpdir, 'revlog.d')
1809 1809 shutil.copyfile(origindexpath, destindexpath)
1810 1810 shutil.copyfile(origdatapath, destdatapath)
1811 1811
1812 1812 # remove the data we want to add again
1813 1813 ui.debug('truncating data to be rewritten\n')
1814 1814 with open(destindexpath, 'ab') as index:
1815 1815 index.seek(0)
1816 1816 index.truncate(truncaterev * orig._io.size)
1817 1817 with open(destdatapath, 'ab') as data:
1818 1818 data.seek(0)
1819 1819 data.truncate(orig.start(truncaterev))
1820 1820
1821 1821 # instantiate a new revlog from the temporary copy
1822 1822 ui.debug('truncating adding to be rewritten\n')
1823 1823 vfs = vfsmod.vfs(tmpdir)
1824 1824 vfs.options = getattr(orig.opener, 'options', None)
1825 1825
1826 1826 dest = revlog.revlog(vfs,
1827 1827 indexfile=indexname,
1828 1828 datafile=dataname)
1829 1829 if dest._inline:
1830 1830 raise error.Abort('not supporting inline revlog (yet)')
1831 1831 # make sure internals are initialized
1832 1832 dest.revision(len(dest) - 1)
1833 1833 yield dest
1834 1834 del dest, vfs
1835 1835 finally:
1836 1836 shutil.rmtree(tmpdir, True)
1837 1837
1838 1838 @command(b'perfrevlogchunks', revlogopts + formatteropts +
1839 1839 [(b'e', b'engines', b'', b'compression engines to use'),
1840 1840 (b's', b'startrev', 0, b'revision to start at')],
1841 1841 b'-c|-m|FILE')
1842 1842 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
1843 1843 """Benchmark operations on revlog chunks.
1844 1844
1845 1845 Logically, each revlog is a collection of fulltext revisions. However,
1846 1846 stored within each revlog are "chunks" of possibly compressed data. This
1847 1847 data needs to be read and decompressed or compressed and written.
1848 1848
1849 1849 This command measures the time it takes to read+decompress and recompress
1850 1850 chunks in a revlog. It effectively isolates I/O and compression performance.
1851 1851 For measurements of higher-level operations like resolving revisions,
1852 1852 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
1853 1853 """
1854 1854 opts = _byteskwargs(opts)
1855 1855
1856 1856 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
1857 1857
1858 1858 # _chunkraw was renamed to _getsegmentforrevs.
1859 1859 try:
1860 1860 segmentforrevs = rl._getsegmentforrevs
1861 1861 except AttributeError:
1862 1862 segmentforrevs = rl._chunkraw
1863 1863
1864 1864 # Verify engines argument.
1865 1865 if engines:
1866 1866 engines = set(e.strip() for e in engines.split(b','))
1867 1867 for engine in engines:
1868 1868 try:
1869 1869 util.compressionengines[engine]
1870 1870 except KeyError:
1871 1871 raise error.Abort(b'unknown compression engine: %s' % engine)
1872 1872 else:
1873 1873 engines = []
1874 1874 for e in util.compengines:
1875 1875 engine = util.compengines[e]
1876 1876 try:
1877 1877 if engine.available():
1878 1878 engine.revlogcompressor().compress(b'dummy')
1879 1879 engines.append(e)
1880 1880 except NotImplementedError:
1881 1881 pass
1882 1882
1883 1883 revs = list(rl.revs(startrev, len(rl) - 1))
1884 1884
1885 1885 def rlfh(rl):
1886 1886 if rl._inline:
1887 1887 return getsvfs(repo)(rl.indexfile)
1888 1888 else:
1889 1889 return getsvfs(repo)(rl.datafile)
1890 1890
1891 1891 def doread():
1892 1892 rl.clearcaches()
1893 1893 for rev in revs:
1894 1894 segmentforrevs(rev, rev)
1895 1895
1896 1896 def doreadcachedfh():
1897 1897 rl.clearcaches()
1898 1898 fh = rlfh(rl)
1899 1899 for rev in revs:
1900 1900 segmentforrevs(rev, rev, df=fh)
1901 1901
1902 1902 def doreadbatch():
1903 1903 rl.clearcaches()
1904 1904 segmentforrevs(revs[0], revs[-1])
1905 1905
1906 1906 def doreadbatchcachedfh():
1907 1907 rl.clearcaches()
1908 1908 fh = rlfh(rl)
1909 1909 segmentforrevs(revs[0], revs[-1], df=fh)
1910 1910
1911 1911 def dochunk():
1912 1912 rl.clearcaches()
1913 1913 fh = rlfh(rl)
1914 1914 for rev in revs:
1915 1915 rl._chunk(rev, df=fh)
1916 1916
1917 1917 chunks = [None]
1918 1918
1919 1919 def dochunkbatch():
1920 1920 rl.clearcaches()
1921 1921 fh = rlfh(rl)
1922 1922 # Save chunks as a side-effect.
1923 1923 chunks[0] = rl._chunks(revs, df=fh)
1924 1924
1925 1925 def docompress(compressor):
1926 1926 rl.clearcaches()
1927 1927
1928 1928 try:
1929 1929 # Swap in the requested compression engine.
1930 1930 oldcompressor = rl._compressor
1931 1931 rl._compressor = compressor
1932 1932 for chunk in chunks[0]:
1933 1933 rl.compress(chunk)
1934 1934 finally:
1935 1935 rl._compressor = oldcompressor
1936 1936
1937 1937 benches = [
1938 1938 (lambda: doread(), b'read'),
1939 1939 (lambda: doreadcachedfh(), b'read w/ reused fd'),
1940 1940 (lambda: doreadbatch(), b'read batch'),
1941 1941 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
1942 1942 (lambda: dochunk(), b'chunk'),
1943 1943 (lambda: dochunkbatch(), b'chunk batch'),
1944 1944 ]
1945 1945
1946 1946 for engine in sorted(engines):
1947 1947 compressor = util.compengines[engine].revlogcompressor()
1948 1948 benches.append((functools.partial(docompress, compressor),
1949 1949 b'compress w/ %s' % engine))
1950 1950
1951 1951 for fn, title in benches:
1952 1952 timer, fm = gettimer(ui, opts)
1953 1953 timer(fn, title=title)
1954 1954 fm.end()
1955 1955
1956 1956 @command(b'perfrevlogrevision', revlogopts + formatteropts +
1957 1957 [(b'', b'cache', False, b'use caches instead of clearing')],
1958 1958 b'-c|-m|FILE REV')
1959 1959 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
1960 1960 """Benchmark obtaining a revlog revision.
1961 1961
1962 1962 Obtaining a revlog revision consists of roughly the following steps:
1963 1963
1964 1964 1. Compute the delta chain
1965 1965 2. Slice the delta chain if applicable
1966 1966 3. Obtain the raw chunks for that delta chain
1967 1967 4. Decompress each raw chunk
1968 1968 5. Apply binary patches to obtain fulltext
1969 1969 6. Verify hash of fulltext
1970 1970
1971 1971 This command measures the time spent in each of these phases.
1972 1972 """
1973 1973 opts = _byteskwargs(opts)
1974 1974
1975 1975 if opts.get(b'changelog') or opts.get(b'manifest'):
1976 1976 file_, rev = None, file_
1977 1977 elif rev is None:
1978 1978 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
1979 1979
1980 1980 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
1981 1981
1982 1982 # _chunkraw was renamed to _getsegmentforrevs.
1983 1983 try:
1984 1984 segmentforrevs = r._getsegmentforrevs
1985 1985 except AttributeError:
1986 1986 segmentforrevs = r._chunkraw
1987 1987
1988 1988 node = r.lookup(rev)
1989 1989 rev = r.rev(node)
1990 1990
1991 1991 def getrawchunks(data, chain):
1992 1992 start = r.start
1993 1993 length = r.length
1994 1994 inline = r._inline
1995 1995 iosize = r._io.size
1996 1996 buffer = util.buffer
1997 1997
1998 1998 chunks = []
1999 1999 ladd = chunks.append
2000 2000 for idx, item in enumerate(chain):
2001 2001 offset = start(item[0])
2002 2002 bits = data[idx]
2003 2003 for rev in item:
2004 2004 chunkstart = start(rev)
2005 2005 if inline:
2006 2006 chunkstart += (rev + 1) * iosize
2007 2007 chunklength = length(rev)
2008 2008 ladd(buffer(bits, chunkstart - offset, chunklength))
2009 2009
2010 2010 return chunks
2011 2011
2012 2012 def dodeltachain(rev):
2013 2013 if not cache:
2014 2014 r.clearcaches()
2015 2015 r._deltachain(rev)
2016 2016
2017 2017 def doread(chain):
2018 2018 if not cache:
2019 2019 r.clearcaches()
2020 2020 for item in slicedchain:
2021 2021 segmentforrevs(item[0], item[-1])
2022 2022
2023 2023 def doslice(r, chain, size):
2024 2024 for s in slicechunk(r, chain, targetsize=size):
2025 2025 pass
2026 2026
2027 2027 def dorawchunks(data, chain):
2028 2028 if not cache:
2029 2029 r.clearcaches()
2030 2030 getrawchunks(data, chain)
2031 2031
2032 2032 def dodecompress(chunks):
2033 2033 decomp = r.decompress
2034 2034 for chunk in chunks:
2035 2035 decomp(chunk)
2036 2036
2037 2037 def dopatch(text, bins):
2038 2038 if not cache:
2039 2039 r.clearcaches()
2040 2040 mdiff.patches(text, bins)
2041 2041
2042 2042 def dohash(text):
2043 2043 if not cache:
2044 2044 r.clearcaches()
2045 2045 r.checkhash(text, node, rev=rev)
2046 2046
2047 2047 def dorevision():
2048 2048 if not cache:
2049 2049 r.clearcaches()
2050 2050 r.revision(node)
2051 2051
2052 2052 try:
2053 2053 from mercurial.revlogutils.deltas import slicechunk
2054 2054 except ImportError:
2055 2055 slicechunk = getattr(revlog, '_slicechunk', None)
2056 2056
2057 2057 size = r.length(rev)
2058 2058 chain = r._deltachain(rev)[0]
2059 2059 if not getattr(r, '_withsparseread', False):
2060 2060 slicedchain = (chain,)
2061 2061 else:
2062 2062 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
2063 2063 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
2064 2064 rawchunks = getrawchunks(data, slicedchain)
2065 2065 bins = r._chunks(chain)
2066 2066 text = bytes(bins[0])
2067 2067 bins = bins[1:]
2068 2068 text = mdiff.patches(text, bins)
2069 2069
2070 2070 benches = [
2071 2071 (lambda: dorevision(), b'full'),
2072 2072 (lambda: dodeltachain(rev), b'deltachain'),
2073 2073 (lambda: doread(chain), b'read'),
2074 2074 ]
2075 2075
2076 2076 if getattr(r, '_withsparseread', False):
2077 2077 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
2078 2078 benches.append(slicing)
2079 2079
2080 2080 benches.extend([
2081 2081 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
2082 2082 (lambda: dodecompress(rawchunks), b'decompress'),
2083 2083 (lambda: dopatch(text, bins), b'patch'),
2084 2084 (lambda: dohash(text), b'hash'),
2085 2085 ])
2086 2086
2087 2087 timer, fm = gettimer(ui, opts)
2088 2088 for fn, title in benches:
2089 2089 timer(fn, title=title)
2090 2090 fm.end()
2091 2091
2092 2092 @command(b'perfrevset',
2093 2093 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
2094 2094 (b'', b'contexts', False, b'obtain changectx for each revision')]
2095 2095 + formatteropts, b"REVSET")
2096 2096 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
2097 2097 """benchmark the execution time of a revset
2098 2098
2099 2099 Use the --clean option if need to evaluate the impact of build volatile
2100 2100 revisions set cache on the revset execution. Volatile cache hold filtered
2101 2101 and obsolete related cache."""
2102 2102 opts = _byteskwargs(opts)
2103 2103
2104 2104 timer, fm = gettimer(ui, opts)
2105 2105 def d():
2106 2106 if clear:
2107 2107 repo.invalidatevolatilesets()
2108 2108 if contexts:
2109 2109 for ctx in repo.set(expr): pass
2110 2110 else:
2111 2111 for r in repo.revs(expr): pass
2112 2112 timer(d)
2113 2113 fm.end()
2114 2114
2115 2115 @command(b'perfvolatilesets',
2116 2116 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
2117 2117 ] + formatteropts)
2118 2118 def perfvolatilesets(ui, repo, *names, **opts):
2119 2119 """benchmark the computation of various volatile set
2120 2120
2121 2121 Volatile set computes element related to filtering and obsolescence."""
2122 2122 opts = _byteskwargs(opts)
2123 2123 timer, fm = gettimer(ui, opts)
2124 2124 repo = repo.unfiltered()
2125 2125
2126 2126 def getobs(name):
2127 2127 def d():
2128 2128 repo.invalidatevolatilesets()
2129 2129 if opts[b'clear_obsstore']:
2130 2130 clearfilecache(repo, b'obsstore')
2131 2131 obsolete.getrevs(repo, name)
2132 2132 return d
2133 2133
2134 2134 allobs = sorted(obsolete.cachefuncs)
2135 2135 if names:
2136 2136 allobs = [n for n in allobs if n in names]
2137 2137
2138 2138 for name in allobs:
2139 2139 timer(getobs(name), title=name)
2140 2140
2141 2141 def getfiltered(name):
2142 2142 def d():
2143 2143 repo.invalidatevolatilesets()
2144 2144 if opts[b'clear_obsstore']:
2145 2145 clearfilecache(repo, b'obsstore')
2146 2146 repoview.filterrevs(repo, name)
2147 2147 return d
2148 2148
2149 2149 allfilter = sorted(repoview.filtertable)
2150 2150 if names:
2151 2151 allfilter = [n for n in allfilter if n in names]
2152 2152
2153 2153 for name in allfilter:
2154 2154 timer(getfiltered(name), title=name)
2155 2155 fm.end()
2156 2156
2157 2157 @command(b'perfbranchmap',
2158 2158 [(b'f', b'full', False,
2159 2159 b'Includes build time of subset'),
2160 2160 (b'', b'clear-revbranch', False,
2161 2161 b'purge the revbranch cache between computation'),
2162 2162 ] + formatteropts)
2163 2163 def perfbranchmap(ui, repo, *filternames, **opts):
2164 2164 """benchmark the update of a branchmap
2165 2165
2166 2166 This benchmarks the full repo.branchmap() call with read and write disabled
2167 2167 """
2168 2168 opts = _byteskwargs(opts)
2169 2169 full = opts.get(b"full", False)
2170 2170 clear_revbranch = opts.get(b"clear_revbranch", False)
2171 2171 timer, fm = gettimer(ui, opts)
2172 2172 def getbranchmap(filtername):
2173 2173 """generate a benchmark function for the filtername"""
2174 2174 if filtername is None:
2175 2175 view = repo
2176 2176 else:
2177 2177 view = repo.filtered(filtername)
2178 2178 def d():
2179 2179 if clear_revbranch:
2180 2180 repo.revbranchcache()._clear()
2181 2181 if full:
2182 2182 view._branchcaches.clear()
2183 2183 else:
2184 2184 view._branchcaches.pop(filtername, None)
2185 2185 view.branchmap()
2186 2186 return d
2187 2187 # add filter in smaller subset to bigger subset
2188 2188 possiblefilters = set(repoview.filtertable)
2189 2189 if filternames:
2190 2190 possiblefilters &= set(filternames)
2191 2191 subsettable = getbranchmapsubsettable()
2192 2192 allfilters = []
2193 2193 while possiblefilters:
2194 2194 for name in possiblefilters:
2195 2195 subset = subsettable.get(name)
2196 2196 if subset not in possiblefilters:
2197 2197 break
2198 2198 else:
2199 2199 assert False, b'subset cycle %s!' % possiblefilters
2200 2200 allfilters.append(name)
2201 2201 possiblefilters.remove(name)
2202 2202
2203 2203 # warm the cache
2204 2204 if not full:
2205 2205 for name in allfilters:
2206 2206 repo.filtered(name).branchmap()
2207 2207 if not filternames or b'unfiltered' in filternames:
2208 2208 # add unfiltered
2209 2209 allfilters.append(None)
2210 2210
2211 2211 branchcacheread = safeattrsetter(branchmap, b'read')
2212 2212 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
2213 2213 branchcacheread.set(lambda repo: None)
2214 2214 branchcachewrite.set(lambda bc, repo: None)
2215 2215 try:
2216 2216 for name in allfilters:
2217 2217 printname = name
2218 2218 if name is None:
2219 2219 printname = b'unfiltered'
2220 2220 timer(getbranchmap(name), title=str(printname))
2221 2221 finally:
2222 2222 branchcacheread.restore()
2223 2223 branchcachewrite.restore()
2224 2224 fm.end()
2225 2225
2226 2226 @command(b'perfbranchmapload', [
2227 2227 (b'f', b'filter', b'', b'Specify repoview filter'),
2228 2228 (b'', b'list', False, b'List brachmap filter caches'),
2229 2229 ] + formatteropts)
2230 2230 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
2231 2231 """benchmark reading the branchmap"""
2232 2232 opts = _byteskwargs(opts)
2233 2233
2234 2234 if list:
2235 2235 for name, kind, st in repo.cachevfs.readdir(stat=True):
2236 2236 if name.startswith(b'branch2'):
2237 2237 filtername = name.partition(b'-')[2] or b'unfiltered'
2238 2238 ui.status(b'%s - %s\n'
2239 2239 % (filtername, util.bytecount(st.st_size)))
2240 2240 return
2241 2241 if filter:
2242 2242 repo = repoview.repoview(repo, filter)
2243 2243 else:
2244 2244 repo = repo.unfiltered()
2245 2245 # try once without timer, the filter may not be cached
2246 2246 if branchmap.read(repo) is None:
2247 2247 raise error.Abort(b'No branchmap cached for %s repo'
2248 2248 % (filter or b'unfiltered'))
2249 2249 timer, fm = gettimer(ui, opts)
2250 timer(lambda: branchmap.read(repo) and None)
2250 def bench():
2251 branchmap.read(repo)
2252 timer(bench)
2251 2253 fm.end()
2252 2254
2253 2255 @command(b'perfloadmarkers')
2254 2256 def perfloadmarkers(ui, repo):
2255 2257 """benchmark the time to parse the on-disk markers for a repo
2256 2258
2257 2259 Result is the number of markers in the repo."""
2258 2260 timer, fm = gettimer(ui)
2259 2261 svfs = getsvfs(repo)
2260 2262 timer(lambda: len(obsolete.obsstore(svfs)))
2261 2263 fm.end()
2262 2264
2263 2265 @command(b'perflrucachedict', formatteropts +
2264 2266 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
2265 2267 (b'', b'mincost', 0, b'smallest cost of items in cache'),
2266 2268 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
2267 2269 (b'', b'size', 4, b'size of cache'),
2268 2270 (b'', b'gets', 10000, b'number of key lookups'),
2269 2271 (b'', b'sets', 10000, b'number of key sets'),
2270 2272 (b'', b'mixed', 10000, b'number of mixed mode operations'),
2271 2273 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
2272 2274 norepo=True)
2273 2275 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
2274 2276 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
2275 2277 opts = _byteskwargs(opts)
2276 2278
2277 2279 def doinit():
2278 2280 for i in _xrange(10000):
2279 2281 util.lrucachedict(size)
2280 2282
2281 2283 costrange = list(range(mincost, maxcost + 1))
2282 2284
2283 2285 values = []
2284 2286 for i in _xrange(size):
2285 2287 values.append(random.randint(0, _maxint))
2286 2288
2287 2289 # Get mode fills the cache and tests raw lookup performance with no
2288 2290 # eviction.
2289 2291 getseq = []
2290 2292 for i in _xrange(gets):
2291 2293 getseq.append(random.choice(values))
2292 2294
2293 2295 def dogets():
2294 2296 d = util.lrucachedict(size)
2295 2297 for v in values:
2296 2298 d[v] = v
2297 2299 for key in getseq:
2298 2300 value = d[key]
2299 2301 value # silence pyflakes warning
2300 2302
2301 2303 def dogetscost():
2302 2304 d = util.lrucachedict(size, maxcost=costlimit)
2303 2305 for i, v in enumerate(values):
2304 2306 d.insert(v, v, cost=costs[i])
2305 2307 for key in getseq:
2306 2308 try:
2307 2309 value = d[key]
2308 2310 value # silence pyflakes warning
2309 2311 except KeyError:
2310 2312 pass
2311 2313
2312 2314 # Set mode tests insertion speed with cache eviction.
2313 2315 setseq = []
2314 2316 costs = []
2315 2317 for i in _xrange(sets):
2316 2318 setseq.append(random.randint(0, _maxint))
2317 2319 costs.append(random.choice(costrange))
2318 2320
2319 2321 def doinserts():
2320 2322 d = util.lrucachedict(size)
2321 2323 for v in setseq:
2322 2324 d.insert(v, v)
2323 2325
2324 2326 def doinsertscost():
2325 2327 d = util.lrucachedict(size, maxcost=costlimit)
2326 2328 for i, v in enumerate(setseq):
2327 2329 d.insert(v, v, cost=costs[i])
2328 2330
2329 2331 def dosets():
2330 2332 d = util.lrucachedict(size)
2331 2333 for v in setseq:
2332 2334 d[v] = v
2333 2335
2334 2336 # Mixed mode randomly performs gets and sets with eviction.
2335 2337 mixedops = []
2336 2338 for i in _xrange(mixed):
2337 2339 r = random.randint(0, 100)
2338 2340 if r < mixedgetfreq:
2339 2341 op = 0
2340 2342 else:
2341 2343 op = 1
2342 2344
2343 2345 mixedops.append((op,
2344 2346 random.randint(0, size * 2),
2345 2347 random.choice(costrange)))
2346 2348
2347 2349 def domixed():
2348 2350 d = util.lrucachedict(size)
2349 2351
2350 2352 for op, v, cost in mixedops:
2351 2353 if op == 0:
2352 2354 try:
2353 2355 d[v]
2354 2356 except KeyError:
2355 2357 pass
2356 2358 else:
2357 2359 d[v] = v
2358 2360
2359 2361 def domixedcost():
2360 2362 d = util.lrucachedict(size, maxcost=costlimit)
2361 2363
2362 2364 for op, v, cost in mixedops:
2363 2365 if op == 0:
2364 2366 try:
2365 2367 d[v]
2366 2368 except KeyError:
2367 2369 pass
2368 2370 else:
2369 2371 d.insert(v, v, cost=cost)
2370 2372
2371 2373 benches = [
2372 2374 (doinit, b'init'),
2373 2375 ]
2374 2376
2375 2377 if costlimit:
2376 2378 benches.extend([
2377 2379 (dogetscost, b'gets w/ cost limit'),
2378 2380 (doinsertscost, b'inserts w/ cost limit'),
2379 2381 (domixedcost, b'mixed w/ cost limit'),
2380 2382 ])
2381 2383 else:
2382 2384 benches.extend([
2383 2385 (dogets, b'gets'),
2384 2386 (doinserts, b'inserts'),
2385 2387 (dosets, b'sets'),
2386 2388 (domixed, b'mixed')
2387 2389 ])
2388 2390
2389 2391 for fn, title in benches:
2390 2392 timer, fm = gettimer(ui, opts)
2391 2393 timer(fn, title=title)
2392 2394 fm.end()
2393 2395
2394 2396 @command(b'perfwrite', formatteropts)
2395 2397 def perfwrite(ui, repo, **opts):
2396 2398 """microbenchmark ui.write
2397 2399 """
2398 2400 opts = _byteskwargs(opts)
2399 2401
2400 2402 timer, fm = gettimer(ui, opts)
2401 2403 def write():
2402 2404 for i in range(100000):
2403 2405 ui.write((b'Testing write performance\n'))
2404 2406 timer(write)
2405 2407 fm.end()
2406 2408
2407 2409 def uisetup(ui):
2408 2410 if (util.safehasattr(cmdutil, b'openrevlog') and
2409 2411 not util.safehasattr(commands, b'debugrevlogopts')):
2410 2412 # for "historical portability":
2411 2413 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
2412 2414 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
2413 2415 # openrevlog() should cause failure, because it has been
2414 2416 # available since 3.5 (or 49c583ca48c4).
2415 2417 def openrevlog(orig, repo, cmd, file_, opts):
2416 2418 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
2417 2419 raise error.Abort(b"This version doesn't support --dir option",
2418 2420 hint=b"use 3.5 or later")
2419 2421 return orig(repo, cmd, file_, opts)
2420 2422 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
General Comments 0
You need to be logged in to leave comments. Login now