##// END OF EJS Templates
perf: move some of the perftags benchmark to the setup function...
Boris Feld -
r40718:4369c00a default
parent child Browse files
Show More
@@ -1,2369 +1,2370 b''
1 1 # perf.py - performance test routines
2 2 '''helper extension to measure performance'''
3 3
4 4 # "historical portability" policy of perf.py:
5 5 #
6 6 # We have to do:
7 7 # - make perf.py "loadable" with as wide Mercurial version as possible
8 8 # This doesn't mean that perf commands work correctly with that Mercurial.
9 9 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
10 10 # - make historical perf command work correctly with as wide Mercurial
11 11 # version as possible
12 12 #
13 13 # We have to do, if possible with reasonable cost:
14 14 # - make recent perf command for historical feature work correctly
15 15 # with early Mercurial
16 16 #
17 17 # We don't have to do:
18 18 # - make perf command for recent feature work correctly with early
19 19 # Mercurial
20 20
21 21 from __future__ import absolute_import
22 22 import contextlib
23 23 import functools
24 24 import gc
25 25 import os
26 26 import random
27 27 import shutil
28 28 import struct
29 29 import sys
30 30 import tempfile
31 31 import threading
32 32 import time
33 33 from mercurial import (
34 34 changegroup,
35 35 cmdutil,
36 36 commands,
37 37 copies,
38 38 error,
39 39 extensions,
40 40 mdiff,
41 41 merge,
42 42 revlog,
43 43 util,
44 44 )
45 45
46 46 # for "historical portability":
47 47 # try to import modules separately (in dict order), and ignore
48 48 # failure, because these aren't available with early Mercurial
49 49 try:
50 50 from mercurial import branchmap # since 2.5 (or bcee63733aad)
51 51 except ImportError:
52 52 pass
53 53 try:
54 54 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
55 55 except ImportError:
56 56 pass
57 57 try:
58 58 from mercurial import registrar # since 3.7 (or 37d50250b696)
59 59 dir(registrar) # forcibly load it
60 60 except ImportError:
61 61 registrar = None
62 62 try:
63 63 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
64 64 except ImportError:
65 65 pass
66 66 try:
67 67 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
68 68 except ImportError:
69 69 pass
70 70
71 71 def identity(a):
72 72 return a
73 73
74 74 try:
75 75 from mercurial import pycompat
76 76 getargspec = pycompat.getargspec # added to module after 4.5
77 77 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
78 78 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
79 79 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
80 80 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
81 81 if pycompat.ispy3:
82 82 _maxint = sys.maxsize # per py3 docs for replacing maxint
83 83 else:
84 84 _maxint = sys.maxint
85 85 except (ImportError, AttributeError):
86 86 import inspect
87 87 getargspec = inspect.getargspec
88 88 _byteskwargs = identity
89 89 fsencode = identity # no py3 support
90 90 _maxint = sys.maxint # no py3 support
91 91 _sysstr = lambda x: x # no py3 support
92 92 _xrange = xrange
93 93
94 94 try:
95 95 # 4.7+
96 96 queue = pycompat.queue.Queue
97 97 except (AttributeError, ImportError):
98 98 # <4.7.
99 99 try:
100 100 queue = pycompat.queue
101 101 except (AttributeError, ImportError):
102 102 queue = util.queue
103 103
104 104 try:
105 105 from mercurial import logcmdutil
106 106 makelogtemplater = logcmdutil.maketemplater
107 107 except (AttributeError, ImportError):
108 108 try:
109 109 makelogtemplater = cmdutil.makelogtemplater
110 110 except (AttributeError, ImportError):
111 111 makelogtemplater = None
112 112
113 113 # for "historical portability":
114 114 # define util.safehasattr forcibly, because util.safehasattr has been
115 115 # available since 1.9.3 (or 94b200a11cf7)
116 116 _undefined = object()
117 117 def safehasattr(thing, attr):
118 118 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
119 119 setattr(util, 'safehasattr', safehasattr)
120 120
121 121 # for "historical portability":
122 122 # define util.timer forcibly, because util.timer has been available
123 123 # since ae5d60bb70c9
124 124 if safehasattr(time, 'perf_counter'):
125 125 util.timer = time.perf_counter
126 126 elif os.name == b'nt':
127 127 util.timer = time.clock
128 128 else:
129 129 util.timer = time.time
130 130
131 131 # for "historical portability":
132 132 # use locally defined empty option list, if formatteropts isn't
133 133 # available, because commands.formatteropts has been available since
134 134 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
135 135 # available since 2.2 (or ae5f92e154d3)
136 136 formatteropts = getattr(cmdutil, "formatteropts",
137 137 getattr(commands, "formatteropts", []))
138 138
139 139 # for "historical portability":
140 140 # use locally defined option list, if debugrevlogopts isn't available,
141 141 # because commands.debugrevlogopts has been available since 3.7 (or
142 142 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
143 143 # since 1.9 (or a79fea6b3e77).
144 144 revlogopts = getattr(cmdutil, "debugrevlogopts",
145 145 getattr(commands, "debugrevlogopts", [
146 146 (b'c', b'changelog', False, (b'open changelog')),
147 147 (b'm', b'manifest', False, (b'open manifest')),
148 148 (b'', b'dir', False, (b'open directory manifest')),
149 149 ]))
150 150
151 151 cmdtable = {}
152 152
153 153 # for "historical portability":
154 154 # define parsealiases locally, because cmdutil.parsealiases has been
155 155 # available since 1.5 (or 6252852b4332)
156 156 def parsealiases(cmd):
157 157 return cmd.split(b"|")
158 158
159 159 if safehasattr(registrar, 'command'):
160 160 command = registrar.command(cmdtable)
161 161 elif safehasattr(cmdutil, 'command'):
162 162 command = cmdutil.command(cmdtable)
163 163 if b'norepo' not in getargspec(command).args:
164 164 # for "historical portability":
165 165 # wrap original cmdutil.command, because "norepo" option has
166 166 # been available since 3.1 (or 75a96326cecb)
167 167 _command = command
168 168 def command(name, options=(), synopsis=None, norepo=False):
169 169 if norepo:
170 170 commands.norepo += b' %s' % b' '.join(parsealiases(name))
171 171 return _command(name, list(options), synopsis)
172 172 else:
173 173 # for "historical portability":
174 174 # define "@command" annotation locally, because cmdutil.command
175 175 # has been available since 1.9 (or 2daa5179e73f)
176 176 def command(name, options=(), synopsis=None, norepo=False):
177 177 def decorator(func):
178 178 if synopsis:
179 179 cmdtable[name] = func, list(options), synopsis
180 180 else:
181 181 cmdtable[name] = func, list(options)
182 182 if norepo:
183 183 commands.norepo += b' %s' % b' '.join(parsealiases(name))
184 184 return func
185 185 return decorator
186 186
187 187 try:
188 188 import mercurial.registrar
189 189 import mercurial.configitems
190 190 configtable = {}
191 191 configitem = mercurial.registrar.configitem(configtable)
192 192 configitem(b'perf', b'presleep',
193 193 default=mercurial.configitems.dynamicdefault,
194 194 )
195 195 configitem(b'perf', b'stub',
196 196 default=mercurial.configitems.dynamicdefault,
197 197 )
198 198 configitem(b'perf', b'parentscount',
199 199 default=mercurial.configitems.dynamicdefault,
200 200 )
201 201 configitem(b'perf', b'all-timing',
202 202 default=mercurial.configitems.dynamicdefault,
203 203 )
204 204 except (ImportError, AttributeError):
205 205 pass
206 206
207 207 def getlen(ui):
208 208 if ui.configbool(b"perf", b"stub", False):
209 209 return lambda x: 1
210 210 return len
211 211
212 212 def gettimer(ui, opts=None):
213 213 """return a timer function and formatter: (timer, formatter)
214 214
215 215 This function exists to gather the creation of formatter in a single
216 216 place instead of duplicating it in all performance commands."""
217 217
218 218 # enforce an idle period before execution to counteract power management
219 219 # experimental config: perf.presleep
220 220 time.sleep(getint(ui, b"perf", b"presleep", 1))
221 221
222 222 if opts is None:
223 223 opts = {}
224 224 # redirect all to stderr unless buffer api is in use
225 225 if not ui._buffers:
226 226 ui = ui.copy()
227 227 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
228 228 if uifout:
229 229 # for "historical portability":
230 230 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
231 231 uifout.set(ui.ferr)
232 232
233 233 # get a formatter
234 234 uiformatter = getattr(ui, 'formatter', None)
235 235 if uiformatter:
236 236 fm = uiformatter(b'perf', opts)
237 237 else:
238 238 # for "historical portability":
239 239 # define formatter locally, because ui.formatter has been
240 240 # available since 2.2 (or ae5f92e154d3)
241 241 from mercurial import node
242 242 class defaultformatter(object):
243 243 """Minimized composition of baseformatter and plainformatter
244 244 """
245 245 def __init__(self, ui, topic, opts):
246 246 self._ui = ui
247 247 if ui.debugflag:
248 248 self.hexfunc = node.hex
249 249 else:
250 250 self.hexfunc = node.short
251 251 def __nonzero__(self):
252 252 return False
253 253 __bool__ = __nonzero__
254 254 def startitem(self):
255 255 pass
256 256 def data(self, **data):
257 257 pass
258 258 def write(self, fields, deftext, *fielddata, **opts):
259 259 self._ui.write(deftext % fielddata, **opts)
260 260 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
261 261 if cond:
262 262 self._ui.write(deftext % fielddata, **opts)
263 263 def plain(self, text, **opts):
264 264 self._ui.write(text, **opts)
265 265 def end(self):
266 266 pass
267 267 fm = defaultformatter(ui, b'perf', opts)
268 268
269 269 # stub function, runs code only once instead of in a loop
270 270 # experimental config: perf.stub
271 271 if ui.configbool(b"perf", b"stub", False):
272 272 return functools.partial(stub_timer, fm), fm
273 273
274 274 # experimental config: perf.all-timing
275 275 displayall = ui.configbool(b"perf", b"all-timing", False)
276 276 return functools.partial(_timer, fm, displayall=displayall), fm
277 277
278 278 def stub_timer(fm, func, setup=None, title=None):
279 279 func()
280 280
281 281 @contextlib.contextmanager
282 282 def timeone():
283 283 r = []
284 284 ostart = os.times()
285 285 cstart = util.timer()
286 286 yield r
287 287 cstop = util.timer()
288 288 ostop = os.times()
289 289 a, b = ostart, ostop
290 290 r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
291 291
292 292 def _timer(fm, func, setup=None, title=None, displayall=False):
293 293 gc.collect()
294 294 results = []
295 295 begin = util.timer()
296 296 count = 0
297 297 while True:
298 298 if setup is not None:
299 299 setup()
300 300 with timeone() as item:
301 301 r = func()
302 302 count += 1
303 303 results.append(item[0])
304 304 cstop = util.timer()
305 305 if cstop - begin > 3 and count >= 100:
306 306 break
307 307 if cstop - begin > 10 and count >= 3:
308 308 break
309 309
310 310 formatone(fm, results, title=title, result=r,
311 311 displayall=displayall)
312 312
313 313 def formatone(fm, timings, title=None, result=None, displayall=False):
314 314
315 315 count = len(timings)
316 316
317 317 fm.startitem()
318 318
319 319 if title:
320 320 fm.write(b'title', b'! %s\n', title)
321 321 if result:
322 322 fm.write(b'result', b'! result: %s\n', result)
323 323 def display(role, entry):
324 324 prefix = b''
325 325 if role != b'best':
326 326 prefix = b'%s.' % role
327 327 fm.plain(b'!')
328 328 fm.write(prefix + b'wall', b' wall %f', entry[0])
329 329 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
330 330 fm.write(prefix + b'user', b' user %f', entry[1])
331 331 fm.write(prefix + b'sys', b' sys %f', entry[2])
332 332 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
333 333 fm.plain(b'\n')
334 334 timings.sort()
335 335 min_val = timings[0]
336 336 display(b'best', min_val)
337 337 if displayall:
338 338 max_val = timings[-1]
339 339 display(b'max', max_val)
340 340 avg = tuple([sum(x) / count for x in zip(*timings)])
341 341 display(b'avg', avg)
342 342 median = timings[len(timings) // 2]
343 343 display(b'median', median)
344 344
345 345 # utilities for historical portability
346 346
347 347 def getint(ui, section, name, default):
348 348 # for "historical portability":
349 349 # ui.configint has been available since 1.9 (or fa2b596db182)
350 350 v = ui.config(section, name, None)
351 351 if v is None:
352 352 return default
353 353 try:
354 354 return int(v)
355 355 except ValueError:
356 356 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
357 357 % (section, name, v))
358 358
359 359 def safeattrsetter(obj, name, ignoremissing=False):
360 360 """Ensure that 'obj' has 'name' attribute before subsequent setattr
361 361
362 362 This function is aborted, if 'obj' doesn't have 'name' attribute
363 363 at runtime. This avoids overlooking removal of an attribute, which
364 364 breaks assumption of performance measurement, in the future.
365 365
366 366 This function returns the object to (1) assign a new value, and
367 367 (2) restore an original value to the attribute.
368 368
369 369 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
370 370 abortion, and this function returns None. This is useful to
371 371 examine an attribute, which isn't ensured in all Mercurial
372 372 versions.
373 373 """
374 374 if not util.safehasattr(obj, name):
375 375 if ignoremissing:
376 376 return None
377 377 raise error.Abort((b"missing attribute %s of %s might break assumption"
378 378 b" of performance measurement") % (name, obj))
379 379
380 380 origvalue = getattr(obj, _sysstr(name))
381 381 class attrutil(object):
382 382 def set(self, newvalue):
383 383 setattr(obj, _sysstr(name), newvalue)
384 384 def restore(self):
385 385 setattr(obj, _sysstr(name), origvalue)
386 386
387 387 return attrutil()
388 388
389 389 # utilities to examine each internal API changes
390 390
391 391 def getbranchmapsubsettable():
392 392 # for "historical portability":
393 393 # subsettable is defined in:
394 394 # - branchmap since 2.9 (or 175c6fd8cacc)
395 395 # - repoview since 2.5 (or 59a9f18d4587)
396 396 for mod in (branchmap, repoview):
397 397 subsettable = getattr(mod, 'subsettable', None)
398 398 if subsettable:
399 399 return subsettable
400 400
401 401 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
402 402 # branchmap and repoview modules exist, but subsettable attribute
403 403 # doesn't)
404 404 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
405 405 hint=b"use 2.5 or later")
406 406
407 407 def getsvfs(repo):
408 408 """Return appropriate object to access files under .hg/store
409 409 """
410 410 # for "historical portability":
411 411 # repo.svfs has been available since 2.3 (or 7034365089bf)
412 412 svfs = getattr(repo, 'svfs', None)
413 413 if svfs:
414 414 return svfs
415 415 else:
416 416 return getattr(repo, 'sopener')
417 417
418 418 def getvfs(repo):
419 419 """Return appropriate object to access files under .hg
420 420 """
421 421 # for "historical portability":
422 422 # repo.vfs has been available since 2.3 (or 7034365089bf)
423 423 vfs = getattr(repo, 'vfs', None)
424 424 if vfs:
425 425 return vfs
426 426 else:
427 427 return getattr(repo, 'opener')
428 428
429 429 def repocleartagscachefunc(repo):
430 430 """Return the function to clear tags cache according to repo internal API
431 431 """
432 432 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
433 433 # in this case, setattr(repo, '_tagscache', None) or so isn't
434 434 # correct way to clear tags cache, because existing code paths
435 435 # expect _tagscache to be a structured object.
436 436 def clearcache():
437 437 # _tagscache has been filteredpropertycache since 2.5 (or
438 438 # 98c867ac1330), and delattr() can't work in such case
439 439 if b'_tagscache' in vars(repo):
440 440 del repo.__dict__[b'_tagscache']
441 441 return clearcache
442 442
443 443 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
444 444 if repotags: # since 1.4 (or 5614a628d173)
445 445 return lambda : repotags.set(None)
446 446
447 447 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
448 448 if repotagscache: # since 0.6 (or d7df759d0e97)
449 449 return lambda : repotagscache.set(None)
450 450
451 451 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
452 452 # this point, but it isn't so problematic, because:
453 453 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
454 454 # in perftags() causes failure soon
455 455 # - perf.py itself has been available since 1.1 (or eb240755386d)
456 456 raise error.Abort((b"tags API of this hg command is unknown"))
457 457
458 458 # utilities to clear cache
459 459
460 460 def clearfilecache(repo, attrname):
461 461 unfi = repo.unfiltered()
462 462 if attrname in vars(unfi):
463 463 delattr(unfi, attrname)
464 464 unfi._filecache.pop(attrname, None)
465 465
466 466 # perf commands
467 467
468 468 @command(b'perfwalk', formatteropts)
469 469 def perfwalk(ui, repo, *pats, **opts):
470 470 opts = _byteskwargs(opts)
471 471 timer, fm = gettimer(ui, opts)
472 472 m = scmutil.match(repo[None], pats, {})
473 473 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
474 474 ignored=False))))
475 475 fm.end()
476 476
477 477 @command(b'perfannotate', formatteropts)
478 478 def perfannotate(ui, repo, f, **opts):
479 479 opts = _byteskwargs(opts)
480 480 timer, fm = gettimer(ui, opts)
481 481 fc = repo[b'.'][f]
482 482 timer(lambda: len(fc.annotate(True)))
483 483 fm.end()
484 484
485 485 @command(b'perfstatus',
486 486 [(b'u', b'unknown', False,
487 487 b'ask status to look for unknown files')] + formatteropts)
488 488 def perfstatus(ui, repo, **opts):
489 489 opts = _byteskwargs(opts)
490 490 #m = match.always(repo.root, repo.getcwd())
491 491 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
492 492 # False))))
493 493 timer, fm = gettimer(ui, opts)
494 494 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
495 495 fm.end()
496 496
497 497 @command(b'perfaddremove', formatteropts)
498 498 def perfaddremove(ui, repo, **opts):
499 499 opts = _byteskwargs(opts)
500 500 timer, fm = gettimer(ui, opts)
501 501 try:
502 502 oldquiet = repo.ui.quiet
503 503 repo.ui.quiet = True
504 504 matcher = scmutil.match(repo[None])
505 505 opts[b'dry_run'] = True
506 506 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
507 507 finally:
508 508 repo.ui.quiet = oldquiet
509 509 fm.end()
510 510
511 511 def clearcaches(cl):
512 512 # behave somewhat consistently across internal API changes
513 513 if util.safehasattr(cl, b'clearcaches'):
514 514 cl.clearcaches()
515 515 elif util.safehasattr(cl, b'_nodecache'):
516 516 from mercurial.node import nullid, nullrev
517 517 cl._nodecache = {nullid: nullrev}
518 518 cl._nodepos = None
519 519
520 520 @command(b'perfheads', formatteropts)
521 521 def perfheads(ui, repo, **opts):
522 522 opts = _byteskwargs(opts)
523 523 timer, fm = gettimer(ui, opts)
524 524 cl = repo.changelog
525 525 def d():
526 526 len(cl.headrevs())
527 527 clearcaches(cl)
528 528 timer(d)
529 529 fm.end()
530 530
531 531 @command(b'perftags', formatteropts)
532 532 def perftags(ui, repo, **opts):
533 533 import mercurial.changelog
534 534 import mercurial.manifest
535 535
536 536 opts = _byteskwargs(opts)
537 537 timer, fm = gettimer(ui, opts)
538 538 svfs = getsvfs(repo)
539 539 repocleartagscache = repocleartagscachefunc(repo)
540 def t():
540 def s():
541 541 repo.changelog = mercurial.changelog.changelog(svfs)
542 542 rootmanifest = mercurial.manifest.manifestrevlog(svfs)
543 543 repo.manifestlog = mercurial.manifest.manifestlog(svfs, repo,
544 544 rootmanifest)
545 545 repocleartagscache()
546 def t():
546 547 return len(repo.tags())
547 timer(t)
548 timer(t, setup=s)
548 549 fm.end()
549 550
550 551 @command(b'perfancestors', formatteropts)
551 552 def perfancestors(ui, repo, **opts):
552 553 opts = _byteskwargs(opts)
553 554 timer, fm = gettimer(ui, opts)
554 555 heads = repo.changelog.headrevs()
555 556 def d():
556 557 for a in repo.changelog.ancestors(heads):
557 558 pass
558 559 timer(d)
559 560 fm.end()
560 561
561 562 @command(b'perfancestorset', formatteropts)
562 563 def perfancestorset(ui, repo, revset, **opts):
563 564 opts = _byteskwargs(opts)
564 565 timer, fm = gettimer(ui, opts)
565 566 revs = repo.revs(revset)
566 567 heads = repo.changelog.headrevs()
567 568 def d():
568 569 s = repo.changelog.ancestors(heads)
569 570 for rev in revs:
570 571 rev in s
571 572 timer(d)
572 573 fm.end()
573 574
574 575 @command(b'perfbookmarks', formatteropts)
575 576 def perfbookmarks(ui, repo, **opts):
576 577 """benchmark parsing bookmarks from disk to memory"""
577 578 opts = _byteskwargs(opts)
578 579 timer, fm = gettimer(ui, opts)
579 580
580 581 def s():
581 582 clearfilecache(repo, b'_bookmarks')
582 583 def d():
583 584 repo._bookmarks
584 585 timer(d, setup=s)
585 586 fm.end()
586 587
587 588 @command(b'perfbundleread', formatteropts, b'BUNDLE')
588 589 def perfbundleread(ui, repo, bundlepath, **opts):
589 590 """Benchmark reading of bundle files.
590 591
591 592 This command is meant to isolate the I/O part of bundle reading as
592 593 much as possible.
593 594 """
594 595 from mercurial import (
595 596 bundle2,
596 597 exchange,
597 598 streamclone,
598 599 )
599 600
600 601 opts = _byteskwargs(opts)
601 602
602 603 def makebench(fn):
603 604 def run():
604 605 with open(bundlepath, b'rb') as fh:
605 606 bundle = exchange.readbundle(ui, fh, bundlepath)
606 607 fn(bundle)
607 608
608 609 return run
609 610
610 611 def makereadnbytes(size):
611 612 def run():
612 613 with open(bundlepath, b'rb') as fh:
613 614 bundle = exchange.readbundle(ui, fh, bundlepath)
614 615 while bundle.read(size):
615 616 pass
616 617
617 618 return run
618 619
619 620 def makestdioread(size):
620 621 def run():
621 622 with open(bundlepath, b'rb') as fh:
622 623 while fh.read(size):
623 624 pass
624 625
625 626 return run
626 627
627 628 # bundle1
628 629
629 630 def deltaiter(bundle):
630 631 for delta in bundle.deltaiter():
631 632 pass
632 633
633 634 def iterchunks(bundle):
634 635 for chunk in bundle.getchunks():
635 636 pass
636 637
637 638 # bundle2
638 639
639 640 def forwardchunks(bundle):
640 641 for chunk in bundle._forwardchunks():
641 642 pass
642 643
643 644 def iterparts(bundle):
644 645 for part in bundle.iterparts():
645 646 pass
646 647
647 648 def iterpartsseekable(bundle):
648 649 for part in bundle.iterparts(seekable=True):
649 650 pass
650 651
651 652 def seek(bundle):
652 653 for part in bundle.iterparts(seekable=True):
653 654 part.seek(0, os.SEEK_END)
654 655
655 656 def makepartreadnbytes(size):
656 657 def run():
657 658 with open(bundlepath, b'rb') as fh:
658 659 bundle = exchange.readbundle(ui, fh, bundlepath)
659 660 for part in bundle.iterparts():
660 661 while part.read(size):
661 662 pass
662 663
663 664 return run
664 665
665 666 benches = [
666 667 (makestdioread(8192), b'read(8k)'),
667 668 (makestdioread(16384), b'read(16k)'),
668 669 (makestdioread(32768), b'read(32k)'),
669 670 (makestdioread(131072), b'read(128k)'),
670 671 ]
671 672
672 673 with open(bundlepath, b'rb') as fh:
673 674 bundle = exchange.readbundle(ui, fh, bundlepath)
674 675
675 676 if isinstance(bundle, changegroup.cg1unpacker):
676 677 benches.extend([
677 678 (makebench(deltaiter), b'cg1 deltaiter()'),
678 679 (makebench(iterchunks), b'cg1 getchunks()'),
679 680 (makereadnbytes(8192), b'cg1 read(8k)'),
680 681 (makereadnbytes(16384), b'cg1 read(16k)'),
681 682 (makereadnbytes(32768), b'cg1 read(32k)'),
682 683 (makereadnbytes(131072), b'cg1 read(128k)'),
683 684 ])
684 685 elif isinstance(bundle, bundle2.unbundle20):
685 686 benches.extend([
686 687 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
687 688 (makebench(iterparts), b'bundle2 iterparts()'),
688 689 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
689 690 (makebench(seek), b'bundle2 part seek()'),
690 691 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
691 692 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
692 693 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
693 694 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
694 695 ])
695 696 elif isinstance(bundle, streamclone.streamcloneapplier):
696 697 raise error.Abort(b'stream clone bundles not supported')
697 698 else:
698 699 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
699 700
700 701 for fn, title in benches:
701 702 timer, fm = gettimer(ui, opts)
702 703 timer(fn, title=title)
703 704 fm.end()
704 705
705 706 @command(b'perfchangegroupchangelog', formatteropts +
706 707 [(b'', b'version', b'02', b'changegroup version'),
707 708 (b'r', b'rev', b'', b'revisions to add to changegroup')])
708 709 def perfchangegroupchangelog(ui, repo, version=b'02', rev=None, **opts):
709 710 """Benchmark producing a changelog group for a changegroup.
710 711
711 712 This measures the time spent processing the changelog during a
712 713 bundle operation. This occurs during `hg bundle` and on a server
713 714 processing a `getbundle` wire protocol request (handles clones
714 715 and pull requests).
715 716
716 717 By default, all revisions are added to the changegroup.
717 718 """
718 719 opts = _byteskwargs(opts)
719 720 cl = repo.changelog
720 721 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
721 722 bundler = changegroup.getbundler(version, repo)
722 723
723 724 def d():
724 725 state, chunks = bundler._generatechangelog(cl, nodes)
725 726 for chunk in chunks:
726 727 pass
727 728
728 729 timer, fm = gettimer(ui, opts)
729 730
730 731 # Terminal printing can interfere with timing. So disable it.
731 732 with ui.configoverride({(b'progress', b'disable'): True}):
732 733 timer(d)
733 734
734 735 fm.end()
735 736
736 737 @command(b'perfdirs', formatteropts)
737 738 def perfdirs(ui, repo, **opts):
738 739 opts = _byteskwargs(opts)
739 740 timer, fm = gettimer(ui, opts)
740 741 dirstate = repo.dirstate
741 742 b'a' in dirstate
742 743 def d():
743 744 dirstate.hasdir(b'a')
744 745 del dirstate._map._dirs
745 746 timer(d)
746 747 fm.end()
747 748
748 749 @command(b'perfdirstate', formatteropts)
749 750 def perfdirstate(ui, repo, **opts):
750 751 opts = _byteskwargs(opts)
751 752 timer, fm = gettimer(ui, opts)
752 753 b"a" in repo.dirstate
753 754 def d():
754 755 repo.dirstate.invalidate()
755 756 b"a" in repo.dirstate
756 757 timer(d)
757 758 fm.end()
758 759
759 760 @command(b'perfdirstatedirs', formatteropts)
760 761 def perfdirstatedirs(ui, repo, **opts):
761 762 opts = _byteskwargs(opts)
762 763 timer, fm = gettimer(ui, opts)
763 764 b"a" in repo.dirstate
764 765 def d():
765 766 repo.dirstate.hasdir(b"a")
766 767 del repo.dirstate._map._dirs
767 768 timer(d)
768 769 fm.end()
769 770
770 771 @command(b'perfdirstatefoldmap', formatteropts)
771 772 def perfdirstatefoldmap(ui, repo, **opts):
772 773 opts = _byteskwargs(opts)
773 774 timer, fm = gettimer(ui, opts)
774 775 dirstate = repo.dirstate
775 776 b'a' in dirstate
776 777 def d():
777 778 dirstate._map.filefoldmap.get(b'a')
778 779 del dirstate._map.filefoldmap
779 780 timer(d)
780 781 fm.end()
781 782
782 783 @command(b'perfdirfoldmap', formatteropts)
783 784 def perfdirfoldmap(ui, repo, **opts):
784 785 opts = _byteskwargs(opts)
785 786 timer, fm = gettimer(ui, opts)
786 787 dirstate = repo.dirstate
787 788 b'a' in dirstate
788 789 def d():
789 790 dirstate._map.dirfoldmap.get(b'a')
790 791 del dirstate._map.dirfoldmap
791 792 del dirstate._map._dirs
792 793 timer(d)
793 794 fm.end()
794 795
795 796 @command(b'perfdirstatewrite', formatteropts)
796 797 def perfdirstatewrite(ui, repo, **opts):
797 798 opts = _byteskwargs(opts)
798 799 timer, fm = gettimer(ui, opts)
799 800 ds = repo.dirstate
800 801 b"a" in ds
801 802 def d():
802 803 ds._dirty = True
803 804 ds.write(repo.currenttransaction())
804 805 timer(d)
805 806 fm.end()
806 807
807 808 @command(b'perfmergecalculate',
808 809 [(b'r', b'rev', b'.', b'rev to merge against')] + formatteropts)
809 810 def perfmergecalculate(ui, repo, rev, **opts):
810 811 opts = _byteskwargs(opts)
811 812 timer, fm = gettimer(ui, opts)
812 813 wctx = repo[None]
813 814 rctx = scmutil.revsingle(repo, rev, rev)
814 815 ancestor = wctx.ancestor(rctx)
815 816 # we don't want working dir files to be stat'd in the benchmark, so prime
816 817 # that cache
817 818 wctx.dirty()
818 819 def d():
819 820 # acceptremote is True because we don't want prompts in the middle of
820 821 # our benchmark
821 822 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
822 823 acceptremote=True, followcopies=True)
823 824 timer(d)
824 825 fm.end()
825 826
826 827 @command(b'perfpathcopies', [], b"REV REV")
827 828 def perfpathcopies(ui, repo, rev1, rev2, **opts):
828 829 opts = _byteskwargs(opts)
829 830 timer, fm = gettimer(ui, opts)
830 831 ctx1 = scmutil.revsingle(repo, rev1, rev1)
831 832 ctx2 = scmutil.revsingle(repo, rev2, rev2)
832 833 def d():
833 834 copies.pathcopies(ctx1, ctx2)
834 835 timer(d)
835 836 fm.end()
836 837
837 838 @command(b'perfphases',
838 839 [(b'', b'full', False, b'include file reading time too'),
839 840 ], b"")
840 841 def perfphases(ui, repo, **opts):
841 842 """benchmark phasesets computation"""
842 843 opts = _byteskwargs(opts)
843 844 timer, fm = gettimer(ui, opts)
844 845 _phases = repo._phasecache
845 846 full = opts.get(b'full')
846 847 def d():
847 848 phases = _phases
848 849 if full:
849 850 clearfilecache(repo, b'_phasecache')
850 851 phases = repo._phasecache
851 852 phases.invalidate()
852 853 phases.loadphaserevs(repo)
853 854 timer(d)
854 855 fm.end()
855 856
856 857 @command(b'perfphasesremote',
857 858 [], b"[DEST]")
858 859 def perfphasesremote(ui, repo, dest=None, **opts):
859 860 """benchmark time needed to analyse phases of the remote server"""
860 861 from mercurial.node import (
861 862 bin,
862 863 )
863 864 from mercurial import (
864 865 exchange,
865 866 hg,
866 867 phases,
867 868 )
868 869 opts = _byteskwargs(opts)
869 870 timer, fm = gettimer(ui, opts)
870 871
871 872 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
872 873 if not path:
873 874 raise error.Abort((b'default repository not configured!'),
874 875 hint=(b"see 'hg help config.paths'"))
875 876 dest = path.pushloc or path.loc
876 877 branches = (path.branch, opts.get(b'branch') or [])
877 878 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
878 879 revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get(b'rev'))
879 880 other = hg.peer(repo, opts, dest)
880 881
881 882 # easier to perform discovery through the operation
882 883 op = exchange.pushoperation(repo, other)
883 884 exchange._pushdiscoverychangeset(op)
884 885
885 886 remotesubset = op.fallbackheads
886 887
887 888 with other.commandexecutor() as e:
888 889 remotephases = e.callcommand(b'listkeys',
889 890 {b'namespace': b'phases'}).result()
890 891 del other
891 892 publishing = remotephases.get(b'publishing', False)
892 893 if publishing:
893 894 ui.status((b'publishing: yes\n'))
894 895 else:
895 896 ui.status((b'publishing: no\n'))
896 897
897 898 nodemap = repo.changelog.nodemap
898 899 nonpublishroots = 0
899 900 for nhex, phase in remotephases.iteritems():
900 901 if nhex == b'publishing': # ignore data related to publish option
901 902 continue
902 903 node = bin(nhex)
903 904 if node in nodemap and int(phase):
904 905 nonpublishroots += 1
905 906 ui.status((b'number of roots: %d\n') % len(remotephases))
906 907 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
907 908 def d():
908 909 phases.remotephasessummary(repo,
909 910 remotesubset,
910 911 remotephases)
911 912 timer(d)
912 913 fm.end()
913 914
914 915 @command(b'perfmanifest',[
915 916 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
916 917 (b'', b'clear-disk', False, b'clear on-disk caches too'),
917 918 ] + formatteropts, b'REV|NODE')
918 919 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
919 920 """benchmark the time to read a manifest from disk and return a usable
920 921 dict-like object
921 922
922 923 Manifest caches are cleared before retrieval."""
923 924 opts = _byteskwargs(opts)
924 925 timer, fm = gettimer(ui, opts)
925 926 if not manifest_rev:
926 927 ctx = scmutil.revsingle(repo, rev, rev)
927 928 t = ctx.manifestnode()
928 929 else:
929 930 from mercurial.node import bin
930 931
931 932 if len(rev) == 40:
932 933 t = bin(rev)
933 934 else:
934 935 try:
935 936 rev = int(rev)
936 937
937 938 if util.safehasattr(repo.manifestlog, b'getstorage'):
938 939 t = repo.manifestlog.getstorage(b'').node(rev)
939 940 else:
940 941 t = repo.manifestlog._revlog.lookup(rev)
941 942 except ValueError:
942 943 raise error.Abort(b'manifest revision must be integer or full '
943 944 b'node')
944 945 def d():
945 946 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
946 947 repo.manifestlog[t].read()
947 948 timer(d)
948 949 fm.end()
949 950
950 951 @command(b'perfchangeset', formatteropts)
951 952 def perfchangeset(ui, repo, rev, **opts):
952 953 opts = _byteskwargs(opts)
953 954 timer, fm = gettimer(ui, opts)
954 955 n = scmutil.revsingle(repo, rev).node()
955 956 def d():
956 957 repo.changelog.read(n)
957 958 #repo.changelog._cache = None
958 959 timer(d)
959 960 fm.end()
960 961
961 962 @command(b'perfindex', formatteropts)
962 963 def perfindex(ui, repo, **opts):
963 964 import mercurial.revlog
964 965 opts = _byteskwargs(opts)
965 966 timer, fm = gettimer(ui, opts)
966 967 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
967 968 n = repo[b"tip"].node()
968 969 svfs = getsvfs(repo)
969 970 def d():
970 971 cl = mercurial.revlog.revlog(svfs, b"00changelog.i")
971 972 cl.rev(n)
972 973 timer(d)
973 974 fm.end()
974 975
975 976 @command(b'perfstartup', formatteropts)
976 977 def perfstartup(ui, repo, **opts):
977 978 opts = _byteskwargs(opts)
978 979 timer, fm = gettimer(ui, opts)
979 980 def d():
980 981 if os.name != r'nt':
981 982 os.system(b"HGRCPATH= %s version -q > /dev/null" %
982 983 fsencode(sys.argv[0]))
983 984 else:
984 985 os.environ[r'HGRCPATH'] = r' '
985 986 os.system(r"%s version -q > NUL" % sys.argv[0])
986 987 timer(d)
987 988 fm.end()
988 989
989 990 @command(b'perfparents', formatteropts)
990 991 def perfparents(ui, repo, **opts):
991 992 opts = _byteskwargs(opts)
992 993 timer, fm = gettimer(ui, opts)
993 994 # control the number of commits perfparents iterates over
994 995 # experimental config: perf.parentscount
995 996 count = getint(ui, b"perf", b"parentscount", 1000)
996 997 if len(repo.changelog) < count:
997 998 raise error.Abort(b"repo needs %d commits for this test" % count)
998 999 repo = repo.unfiltered()
999 1000 nl = [repo.changelog.node(i) for i in _xrange(count)]
1000 1001 def d():
1001 1002 for n in nl:
1002 1003 repo.changelog.parents(n)
1003 1004 timer(d)
1004 1005 fm.end()
1005 1006
1006 1007 @command(b'perfctxfiles', formatteropts)
1007 1008 def perfctxfiles(ui, repo, x, **opts):
1008 1009 opts = _byteskwargs(opts)
1009 1010 x = int(x)
1010 1011 timer, fm = gettimer(ui, opts)
1011 1012 def d():
1012 1013 len(repo[x].files())
1013 1014 timer(d)
1014 1015 fm.end()
1015 1016
1016 1017 @command(b'perfrawfiles', formatteropts)
1017 1018 def perfrawfiles(ui, repo, x, **opts):
1018 1019 opts = _byteskwargs(opts)
1019 1020 x = int(x)
1020 1021 timer, fm = gettimer(ui, opts)
1021 1022 cl = repo.changelog
1022 1023 def d():
1023 1024 len(cl.read(x)[3])
1024 1025 timer(d)
1025 1026 fm.end()
1026 1027
1027 1028 @command(b'perflookup', formatteropts)
1028 1029 def perflookup(ui, repo, rev, **opts):
1029 1030 opts = _byteskwargs(opts)
1030 1031 timer, fm = gettimer(ui, opts)
1031 1032 timer(lambda: len(repo.lookup(rev)))
1032 1033 fm.end()
1033 1034
1034 1035 @command(b'perflinelogedits',
1035 1036 [(b'n', b'edits', 10000, b'number of edits'),
1036 1037 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1037 1038 ], norepo=True)
1038 1039 def perflinelogedits(ui, **opts):
1039 1040 from mercurial import linelog
1040 1041
1041 1042 opts = _byteskwargs(opts)
1042 1043
1043 1044 edits = opts[b'edits']
1044 1045 maxhunklines = opts[b'max_hunk_lines']
1045 1046
1046 1047 maxb1 = 100000
1047 1048 random.seed(0)
1048 1049 randint = random.randint
1049 1050 currentlines = 0
1050 1051 arglist = []
1051 1052 for rev in _xrange(edits):
1052 1053 a1 = randint(0, currentlines)
1053 1054 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1054 1055 b1 = randint(0, maxb1)
1055 1056 b2 = randint(b1, b1 + maxhunklines)
1056 1057 currentlines += (b2 - b1) - (a2 - a1)
1057 1058 arglist.append((rev, a1, a2, b1, b2))
1058 1059
1059 1060 def d():
1060 1061 ll = linelog.linelog()
1061 1062 for args in arglist:
1062 1063 ll.replacelines(*args)
1063 1064
1064 1065 timer, fm = gettimer(ui, opts)
1065 1066 timer(d)
1066 1067 fm.end()
1067 1068
1068 1069 @command(b'perfrevrange', formatteropts)
1069 1070 def perfrevrange(ui, repo, *specs, **opts):
1070 1071 opts = _byteskwargs(opts)
1071 1072 timer, fm = gettimer(ui, opts)
1072 1073 revrange = scmutil.revrange
1073 1074 timer(lambda: len(revrange(repo, specs)))
1074 1075 fm.end()
1075 1076
1076 1077 @command(b'perfnodelookup', formatteropts)
1077 1078 def perfnodelookup(ui, repo, rev, **opts):
1078 1079 opts = _byteskwargs(opts)
1079 1080 timer, fm = gettimer(ui, opts)
1080 1081 import mercurial.revlog
1081 1082 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1082 1083 n = scmutil.revsingle(repo, rev).node()
1083 1084 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1084 1085 def d():
1085 1086 cl.rev(n)
1086 1087 clearcaches(cl)
1087 1088 timer(d)
1088 1089 fm.end()
1089 1090
1090 1091 @command(b'perflog',
1091 1092 [(b'', b'rename', False, b'ask log to follow renames')
1092 1093 ] + formatteropts)
1093 1094 def perflog(ui, repo, rev=None, **opts):
1094 1095 opts = _byteskwargs(opts)
1095 1096 if rev is None:
1096 1097 rev=[]
1097 1098 timer, fm = gettimer(ui, opts)
1098 1099 ui.pushbuffer()
1099 1100 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1100 1101 copies=opts.get(b'rename')))
1101 1102 ui.popbuffer()
1102 1103 fm.end()
1103 1104
1104 1105 @command(b'perfmoonwalk', formatteropts)
1105 1106 def perfmoonwalk(ui, repo, **opts):
1106 1107 """benchmark walking the changelog backwards
1107 1108
1108 1109 This also loads the changelog data for each revision in the changelog.
1109 1110 """
1110 1111 opts = _byteskwargs(opts)
1111 1112 timer, fm = gettimer(ui, opts)
1112 1113 def moonwalk():
1113 1114 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1114 1115 ctx = repo[i]
1115 1116 ctx.branch() # read changelog data (in addition to the index)
1116 1117 timer(moonwalk)
1117 1118 fm.end()
1118 1119
1119 1120 @command(b'perftemplating',
1120 1121 [(b'r', b'rev', [], b'revisions to run the template on'),
1121 1122 ] + formatteropts)
1122 1123 def perftemplating(ui, repo, testedtemplate=None, **opts):
1123 1124 """test the rendering time of a given template"""
1124 1125 if makelogtemplater is None:
1125 1126 raise error.Abort((b"perftemplating not available with this Mercurial"),
1126 1127 hint=b"use 4.3 or later")
1127 1128
1128 1129 opts = _byteskwargs(opts)
1129 1130
1130 1131 nullui = ui.copy()
1131 1132 nullui.fout = open(os.devnull, r'wb')
1132 1133 nullui.disablepager()
1133 1134 revs = opts.get(b'rev')
1134 1135 if not revs:
1135 1136 revs = [b'all()']
1136 1137 revs = list(scmutil.revrange(repo, revs))
1137 1138
1138 1139 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1139 1140 b' {author|person}: {desc|firstline}\n')
1140 1141 if testedtemplate is None:
1141 1142 testedtemplate = defaulttemplate
1142 1143 displayer = makelogtemplater(nullui, repo, testedtemplate)
1143 1144 def format():
1144 1145 for r in revs:
1145 1146 ctx = repo[r]
1146 1147 displayer.show(ctx)
1147 1148 displayer.flush(ctx)
1148 1149
1149 1150 timer, fm = gettimer(ui, opts)
1150 1151 timer(format)
1151 1152 fm.end()
1152 1153
1153 1154 @command(b'perfcca', formatteropts)
1154 1155 def perfcca(ui, repo, **opts):
1155 1156 opts = _byteskwargs(opts)
1156 1157 timer, fm = gettimer(ui, opts)
1157 1158 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1158 1159 fm.end()
1159 1160
1160 1161 @command(b'perffncacheload', formatteropts)
1161 1162 def perffncacheload(ui, repo, **opts):
1162 1163 opts = _byteskwargs(opts)
1163 1164 timer, fm = gettimer(ui, opts)
1164 1165 s = repo.store
1165 1166 def d():
1166 1167 s.fncache._load()
1167 1168 timer(d)
1168 1169 fm.end()
1169 1170
1170 1171 @command(b'perffncachewrite', formatteropts)
1171 1172 def perffncachewrite(ui, repo, **opts):
1172 1173 opts = _byteskwargs(opts)
1173 1174 timer, fm = gettimer(ui, opts)
1174 1175 s = repo.store
1175 1176 lock = repo.lock()
1176 1177 s.fncache._load()
1177 1178 tr = repo.transaction(b'perffncachewrite')
1178 1179 tr.addbackup(b'fncache')
1179 1180 def d():
1180 1181 s.fncache._dirty = True
1181 1182 s.fncache.write(tr)
1182 1183 timer(d)
1183 1184 tr.close()
1184 1185 lock.release()
1185 1186 fm.end()
1186 1187
1187 1188 @command(b'perffncacheencode', formatteropts)
1188 1189 def perffncacheencode(ui, repo, **opts):
1189 1190 opts = _byteskwargs(opts)
1190 1191 timer, fm = gettimer(ui, opts)
1191 1192 s = repo.store
1192 1193 s.fncache._load()
1193 1194 def d():
1194 1195 for p in s.fncache.entries:
1195 1196 s.encode(p)
1196 1197 timer(d)
1197 1198 fm.end()
1198 1199
1199 1200 def _bdiffworker(q, blocks, xdiff, ready, done):
1200 1201 while not done.is_set():
1201 1202 pair = q.get()
1202 1203 while pair is not None:
1203 1204 if xdiff:
1204 1205 mdiff.bdiff.xdiffblocks(*pair)
1205 1206 elif blocks:
1206 1207 mdiff.bdiff.blocks(*pair)
1207 1208 else:
1208 1209 mdiff.textdiff(*pair)
1209 1210 q.task_done()
1210 1211 pair = q.get()
1211 1212 q.task_done() # for the None one
1212 1213 with ready:
1213 1214 ready.wait()
1214 1215
1215 1216 def _manifestrevision(repo, mnode):
1216 1217 ml = repo.manifestlog
1217 1218
1218 1219 if util.safehasattr(ml, b'getstorage'):
1219 1220 store = ml.getstorage(b'')
1220 1221 else:
1221 1222 store = ml._revlog
1222 1223
1223 1224 return store.revision(mnode)
1224 1225
1225 1226 @command(b'perfbdiff', revlogopts + formatteropts + [
1226 1227 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1227 1228 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1228 1229 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1229 1230 (b'', b'blocks', False, b'test computing diffs into blocks'),
1230 1231 (b'', b'xdiff', False, b'use xdiff algorithm'),
1231 1232 ],
1232 1233
1233 1234 b'-c|-m|FILE REV')
1234 1235 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1235 1236 """benchmark a bdiff between revisions
1236 1237
1237 1238 By default, benchmark a bdiff between its delta parent and itself.
1238 1239
1239 1240 With ``--count``, benchmark bdiffs between delta parents and self for N
1240 1241 revisions starting at the specified revision.
1241 1242
1242 1243 With ``--alldata``, assume the requested revision is a changeset and
1243 1244 measure bdiffs for all changes related to that changeset (manifest
1244 1245 and filelogs).
1245 1246 """
1246 1247 opts = _byteskwargs(opts)
1247 1248
1248 1249 if opts[b'xdiff'] and not opts[b'blocks']:
1249 1250 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1250 1251
1251 1252 if opts[b'alldata']:
1252 1253 opts[b'changelog'] = True
1253 1254
1254 1255 if opts.get(b'changelog') or opts.get(b'manifest'):
1255 1256 file_, rev = None, file_
1256 1257 elif rev is None:
1257 1258 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1258 1259
1259 1260 blocks = opts[b'blocks']
1260 1261 xdiff = opts[b'xdiff']
1261 1262 textpairs = []
1262 1263
1263 1264 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1264 1265
1265 1266 startrev = r.rev(r.lookup(rev))
1266 1267 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1267 1268 if opts[b'alldata']:
1268 1269 # Load revisions associated with changeset.
1269 1270 ctx = repo[rev]
1270 1271 mtext = _manifestrevision(repo, ctx.manifestnode())
1271 1272 for pctx in ctx.parents():
1272 1273 pman = _manifestrevision(repo, pctx.manifestnode())
1273 1274 textpairs.append((pman, mtext))
1274 1275
1275 1276 # Load filelog revisions by iterating manifest delta.
1276 1277 man = ctx.manifest()
1277 1278 pman = ctx.p1().manifest()
1278 1279 for filename, change in pman.diff(man).items():
1279 1280 fctx = repo.file(filename)
1280 1281 f1 = fctx.revision(change[0][0] or -1)
1281 1282 f2 = fctx.revision(change[1][0] or -1)
1282 1283 textpairs.append((f1, f2))
1283 1284 else:
1284 1285 dp = r.deltaparent(rev)
1285 1286 textpairs.append((r.revision(dp), r.revision(rev)))
1286 1287
1287 1288 withthreads = threads > 0
1288 1289 if not withthreads:
1289 1290 def d():
1290 1291 for pair in textpairs:
1291 1292 if xdiff:
1292 1293 mdiff.bdiff.xdiffblocks(*pair)
1293 1294 elif blocks:
1294 1295 mdiff.bdiff.blocks(*pair)
1295 1296 else:
1296 1297 mdiff.textdiff(*pair)
1297 1298 else:
1298 1299 q = queue()
1299 1300 for i in _xrange(threads):
1300 1301 q.put(None)
1301 1302 ready = threading.Condition()
1302 1303 done = threading.Event()
1303 1304 for i in _xrange(threads):
1304 1305 threading.Thread(target=_bdiffworker,
1305 1306 args=(q, blocks, xdiff, ready, done)).start()
1306 1307 q.join()
1307 1308 def d():
1308 1309 for pair in textpairs:
1309 1310 q.put(pair)
1310 1311 for i in _xrange(threads):
1311 1312 q.put(None)
1312 1313 with ready:
1313 1314 ready.notify_all()
1314 1315 q.join()
1315 1316 timer, fm = gettimer(ui, opts)
1316 1317 timer(d)
1317 1318 fm.end()
1318 1319
1319 1320 if withthreads:
1320 1321 done.set()
1321 1322 for i in _xrange(threads):
1322 1323 q.put(None)
1323 1324 with ready:
1324 1325 ready.notify_all()
1325 1326
1326 1327 @command(b'perfunidiff', revlogopts + formatteropts + [
1327 1328 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1328 1329 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
1329 1330 ], b'-c|-m|FILE REV')
1330 1331 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1331 1332 """benchmark a unified diff between revisions
1332 1333
1333 1334 This doesn't include any copy tracing - it's just a unified diff
1334 1335 of the texts.
1335 1336
1336 1337 By default, benchmark a diff between its delta parent and itself.
1337 1338
1338 1339 With ``--count``, benchmark diffs between delta parents and self for N
1339 1340 revisions starting at the specified revision.
1340 1341
1341 1342 With ``--alldata``, assume the requested revision is a changeset and
1342 1343 measure diffs for all changes related to that changeset (manifest
1343 1344 and filelogs).
1344 1345 """
1345 1346 opts = _byteskwargs(opts)
1346 1347 if opts[b'alldata']:
1347 1348 opts[b'changelog'] = True
1348 1349
1349 1350 if opts.get(b'changelog') or opts.get(b'manifest'):
1350 1351 file_, rev = None, file_
1351 1352 elif rev is None:
1352 1353 raise error.CommandError(b'perfunidiff', b'invalid arguments')
1353 1354
1354 1355 textpairs = []
1355 1356
1356 1357 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
1357 1358
1358 1359 startrev = r.rev(r.lookup(rev))
1359 1360 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1360 1361 if opts[b'alldata']:
1361 1362 # Load revisions associated with changeset.
1362 1363 ctx = repo[rev]
1363 1364 mtext = _manifestrevision(repo, ctx.manifestnode())
1364 1365 for pctx in ctx.parents():
1365 1366 pman = _manifestrevision(repo, pctx.manifestnode())
1366 1367 textpairs.append((pman, mtext))
1367 1368
1368 1369 # Load filelog revisions by iterating manifest delta.
1369 1370 man = ctx.manifest()
1370 1371 pman = ctx.p1().manifest()
1371 1372 for filename, change in pman.diff(man).items():
1372 1373 fctx = repo.file(filename)
1373 1374 f1 = fctx.revision(change[0][0] or -1)
1374 1375 f2 = fctx.revision(change[1][0] or -1)
1375 1376 textpairs.append((f1, f2))
1376 1377 else:
1377 1378 dp = r.deltaparent(rev)
1378 1379 textpairs.append((r.revision(dp), r.revision(rev)))
1379 1380
1380 1381 def d():
1381 1382 for left, right in textpairs:
1382 1383 # The date strings don't matter, so we pass empty strings.
1383 1384 headerlines, hunks = mdiff.unidiff(
1384 1385 left, b'', right, b'', b'left', b'right', binary=False)
1385 1386 # consume iterators in roughly the way patch.py does
1386 1387 b'\n'.join(headerlines)
1387 1388 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1388 1389 timer, fm = gettimer(ui, opts)
1389 1390 timer(d)
1390 1391 fm.end()
1391 1392
1392 1393 @command(b'perfdiffwd', formatteropts)
1393 1394 def perfdiffwd(ui, repo, **opts):
1394 1395 """Profile diff of working directory changes"""
1395 1396 opts = _byteskwargs(opts)
1396 1397 timer, fm = gettimer(ui, opts)
1397 1398 options = {
1398 1399 'w': 'ignore_all_space',
1399 1400 'b': 'ignore_space_change',
1400 1401 'B': 'ignore_blank_lines',
1401 1402 }
1402 1403
1403 1404 for diffopt in ('', 'w', 'b', 'B', 'wB'):
1404 1405 opts = dict((options[c], b'1') for c in diffopt)
1405 1406 def d():
1406 1407 ui.pushbuffer()
1407 1408 commands.diff(ui, repo, **opts)
1408 1409 ui.popbuffer()
1409 1410 diffopt = diffopt.encode('ascii')
1410 1411 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
1411 1412 timer(d, title=title)
1412 1413 fm.end()
1413 1414
1414 1415 @command(b'perfrevlogindex', revlogopts + formatteropts,
1415 1416 b'-c|-m|FILE')
1416 1417 def perfrevlogindex(ui, repo, file_=None, **opts):
1417 1418 """Benchmark operations against a revlog index.
1418 1419
1419 1420 This tests constructing a revlog instance, reading index data,
1420 1421 parsing index data, and performing various operations related to
1421 1422 index data.
1422 1423 """
1423 1424
1424 1425 opts = _byteskwargs(opts)
1425 1426
1426 1427 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
1427 1428
1428 1429 opener = getattr(rl, 'opener') # trick linter
1429 1430 indexfile = rl.indexfile
1430 1431 data = opener.read(indexfile)
1431 1432
1432 1433 header = struct.unpack(b'>I', data[0:4])[0]
1433 1434 version = header & 0xFFFF
1434 1435 if version == 1:
1435 1436 revlogio = revlog.revlogio()
1436 1437 inline = header & (1 << 16)
1437 1438 else:
1438 1439 raise error.Abort((b'unsupported revlog version: %d') % version)
1439 1440
1440 1441 rllen = len(rl)
1441 1442
1442 1443 node0 = rl.node(0)
1443 1444 node25 = rl.node(rllen // 4)
1444 1445 node50 = rl.node(rllen // 2)
1445 1446 node75 = rl.node(rllen // 4 * 3)
1446 1447 node100 = rl.node(rllen - 1)
1447 1448
1448 1449 allrevs = range(rllen)
1449 1450 allrevsrev = list(reversed(allrevs))
1450 1451 allnodes = [rl.node(rev) for rev in range(rllen)]
1451 1452 allnodesrev = list(reversed(allnodes))
1452 1453
1453 1454 def constructor():
1454 1455 revlog.revlog(opener, indexfile)
1455 1456
1456 1457 def read():
1457 1458 with opener(indexfile) as fh:
1458 1459 fh.read()
1459 1460
1460 1461 def parseindex():
1461 1462 revlogio.parseindex(data, inline)
1462 1463
1463 1464 def getentry(revornode):
1464 1465 index = revlogio.parseindex(data, inline)[0]
1465 1466 index[revornode]
1466 1467
1467 1468 def getentries(revs, count=1):
1468 1469 index = revlogio.parseindex(data, inline)[0]
1469 1470
1470 1471 for i in range(count):
1471 1472 for rev in revs:
1472 1473 index[rev]
1473 1474
1474 1475 def resolvenode(node):
1475 1476 nodemap = revlogio.parseindex(data, inline)[1]
1476 1477 # This only works for the C code.
1477 1478 if nodemap is None:
1478 1479 return
1479 1480
1480 1481 try:
1481 1482 nodemap[node]
1482 1483 except error.RevlogError:
1483 1484 pass
1484 1485
1485 1486 def resolvenodes(nodes, count=1):
1486 1487 nodemap = revlogio.parseindex(data, inline)[1]
1487 1488 if nodemap is None:
1488 1489 return
1489 1490
1490 1491 for i in range(count):
1491 1492 for node in nodes:
1492 1493 try:
1493 1494 nodemap[node]
1494 1495 except error.RevlogError:
1495 1496 pass
1496 1497
1497 1498 benches = [
1498 1499 (constructor, b'revlog constructor'),
1499 1500 (read, b'read'),
1500 1501 (parseindex, b'create index object'),
1501 1502 (lambda: getentry(0), b'retrieve index entry for rev 0'),
1502 1503 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
1503 1504 (lambda: resolvenode(node0), b'look up node at rev 0'),
1504 1505 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
1505 1506 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
1506 1507 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
1507 1508 (lambda: resolvenode(node100), b'look up node at tip'),
1508 1509 # 2x variation is to measure caching impact.
1509 1510 (lambda: resolvenodes(allnodes),
1510 1511 b'look up all nodes (forward)'),
1511 1512 (lambda: resolvenodes(allnodes, 2),
1512 1513 b'look up all nodes 2x (forward)'),
1513 1514 (lambda: resolvenodes(allnodesrev),
1514 1515 b'look up all nodes (reverse)'),
1515 1516 (lambda: resolvenodes(allnodesrev, 2),
1516 1517 b'look up all nodes 2x (reverse)'),
1517 1518 (lambda: getentries(allrevs),
1518 1519 b'retrieve all index entries (forward)'),
1519 1520 (lambda: getentries(allrevs, 2),
1520 1521 b'retrieve all index entries 2x (forward)'),
1521 1522 (lambda: getentries(allrevsrev),
1522 1523 b'retrieve all index entries (reverse)'),
1523 1524 (lambda: getentries(allrevsrev, 2),
1524 1525 b'retrieve all index entries 2x (reverse)'),
1525 1526 ]
1526 1527
1527 1528 for fn, title in benches:
1528 1529 timer, fm = gettimer(ui, opts)
1529 1530 timer(fn, title=title)
1530 1531 fm.end()
1531 1532
1532 1533 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
1533 1534 [(b'd', b'dist', 100, b'distance between the revisions'),
1534 1535 (b's', b'startrev', 0, b'revision to start reading at'),
1535 1536 (b'', b'reverse', False, b'read in reverse')],
1536 1537 b'-c|-m|FILE')
1537 1538 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
1538 1539 **opts):
1539 1540 """Benchmark reading a series of revisions from a revlog.
1540 1541
1541 1542 By default, we read every ``-d/--dist`` revision from 0 to tip of
1542 1543 the specified revlog.
1543 1544
1544 1545 The start revision can be defined via ``-s/--startrev``.
1545 1546 """
1546 1547 opts = _byteskwargs(opts)
1547 1548
1548 1549 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
1549 1550 rllen = getlen(ui)(rl)
1550 1551
1551 1552 if startrev < 0:
1552 1553 startrev = rllen + startrev
1553 1554
1554 1555 def d():
1555 1556 rl.clearcaches()
1556 1557
1557 1558 beginrev = startrev
1558 1559 endrev = rllen
1559 1560 dist = opts[b'dist']
1560 1561
1561 1562 if reverse:
1562 1563 beginrev, endrev = endrev - 1, beginrev - 1
1563 1564 dist = -1 * dist
1564 1565
1565 1566 for x in _xrange(beginrev, endrev, dist):
1566 1567 # Old revisions don't support passing int.
1567 1568 n = rl.node(x)
1568 1569 rl.revision(n)
1569 1570
1570 1571 timer, fm = gettimer(ui, opts)
1571 1572 timer(d)
1572 1573 fm.end()
1573 1574
1574 1575 @command(b'perfrevlogwrite', revlogopts + formatteropts +
1575 1576 [(b's', b'startrev', 1000, b'revision to start writing at'),
1576 1577 (b'', b'stoprev', -1, b'last revision to write'),
1577 1578 (b'', b'count', 3, b'last revision to write'),
1578 1579 (b'', b'details', False, b'print timing for every revisions tested'),
1579 1580 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
1580 1581 (b'', b'lazydeltabase', True, b'try the provided delta first'),
1581 1582 ],
1582 1583 b'-c|-m|FILE')
1583 1584 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
1584 1585 """Benchmark writing a series of revisions to a revlog.
1585 1586
1586 1587 Possible source values are:
1587 1588 * `full`: add from a full text (default).
1588 1589 * `parent-1`: add from a delta to the first parent
1589 1590 * `parent-2`: add from a delta to the second parent if it exists
1590 1591 (use a delta from the first parent otherwise)
1591 1592 * `parent-smallest`: add from the smallest delta (either p1 or p2)
1592 1593 * `storage`: add from the existing precomputed deltas
1593 1594 """
1594 1595 opts = _byteskwargs(opts)
1595 1596
1596 1597 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
1597 1598 rllen = getlen(ui)(rl)
1598 1599 if startrev < 0:
1599 1600 startrev = rllen + startrev
1600 1601 if stoprev < 0:
1601 1602 stoprev = rllen + stoprev
1602 1603
1603 1604 lazydeltabase = opts['lazydeltabase']
1604 1605 source = opts['source']
1605 1606 validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest',
1606 1607 b'storage')
1607 1608 if source not in validsource:
1608 1609 raise error.Abort('invalid source type: %s' % source)
1609 1610
1610 1611 ### actually gather results
1611 1612 count = opts['count']
1612 1613 if count <= 0:
1613 1614 raise error.Abort('invalide run count: %d' % count)
1614 1615 allresults = []
1615 1616 for c in range(count):
1616 1617 timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1,
1617 1618 lazydeltabase=lazydeltabase)
1618 1619 allresults.append(timing)
1619 1620
1620 1621 ### consolidate the results in a single list
1621 1622 results = []
1622 1623 for idx, (rev, t) in enumerate(allresults[0]):
1623 1624 ts = [t]
1624 1625 for other in allresults[1:]:
1625 1626 orev, ot = other[idx]
1626 1627 assert orev == rev
1627 1628 ts.append(ot)
1628 1629 results.append((rev, ts))
1629 1630 resultcount = len(results)
1630 1631
1631 1632 ### Compute and display relevant statistics
1632 1633
1633 1634 # get a formatter
1634 1635 fm = ui.formatter(b'perf', opts)
1635 1636 displayall = ui.configbool(b"perf", b"all-timing", False)
1636 1637
1637 1638 # print individual details if requested
1638 1639 if opts['details']:
1639 1640 for idx, item in enumerate(results, 1):
1640 1641 rev, data = item
1641 1642 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
1642 1643 formatone(fm, data, title=title, displayall=displayall)
1643 1644
1644 1645 # sorts results by median time
1645 1646 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
1646 1647 # list of (name, index) to display)
1647 1648 relevants = [
1648 1649 ("min", 0),
1649 1650 ("10%", resultcount * 10 // 100),
1650 1651 ("25%", resultcount * 25 // 100),
1651 1652 ("50%", resultcount * 70 // 100),
1652 1653 ("75%", resultcount * 75 // 100),
1653 1654 ("90%", resultcount * 90 // 100),
1654 1655 ("95%", resultcount * 95 // 100),
1655 1656 ("99%", resultcount * 99 // 100),
1656 1657 ("max", -1),
1657 1658 ]
1658 1659 if not ui.quiet:
1659 1660 for name, idx in relevants:
1660 1661 data = results[idx]
1661 1662 title = '%s of %d, rev %d' % (name, resultcount, data[0])
1662 1663 formatone(fm, data[1], title=title, displayall=displayall)
1663 1664
1664 1665 # XXX summing that many float will not be very precise, we ignore this fact
1665 1666 # for now
1666 1667 totaltime = []
1667 1668 for item in allresults:
1668 1669 totaltime.append((sum(x[1][0] for x in item),
1669 1670 sum(x[1][1] for x in item),
1670 1671 sum(x[1][2] for x in item),)
1671 1672 )
1672 1673 formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
1673 1674 displayall=displayall)
1674 1675 fm.end()
1675 1676
1676 1677 class _faketr(object):
1677 1678 def add(s, x, y, z=None):
1678 1679 return None
1679 1680
1680 1681 def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None,
1681 1682 lazydeltabase=True):
1682 1683 timings = []
1683 1684 tr = _faketr()
1684 1685 with _temprevlog(ui, orig, startrev) as dest:
1685 1686 dest._lazydeltabase = lazydeltabase
1686 1687 revs = list(orig.revs(startrev, stoprev))
1687 1688 total = len(revs)
1688 1689 topic = 'adding'
1689 1690 if runidx is not None:
1690 1691 topic += ' (run #%d)' % runidx
1691 1692 for idx, rev in enumerate(revs):
1692 1693 ui.progress(topic, idx, unit='revs', total=total)
1693 1694 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
1694 1695 with timeone() as r:
1695 1696 dest.addrawrevision(*addargs, **addkwargs)
1696 1697 timings.append((rev, r[0]))
1697 1698 ui.progress(topic, total, unit='revs', total=total)
1698 1699 ui.progress(topic, None, unit='revs', total=total)
1699 1700 return timings
1700 1701
1701 1702 def _getrevisionseed(orig, rev, tr, source):
1702 1703 from mercurial.node import nullid
1703 1704
1704 1705 linkrev = orig.linkrev(rev)
1705 1706 node = orig.node(rev)
1706 1707 p1, p2 = orig.parents(node)
1707 1708 flags = orig.flags(rev)
1708 1709 cachedelta = None
1709 1710 text = None
1710 1711
1711 1712 if source == b'full':
1712 1713 text = orig.revision(rev)
1713 1714 elif source == b'parent-1':
1714 1715 baserev = orig.rev(p1)
1715 1716 cachedelta = (baserev, orig.revdiff(p1, rev))
1716 1717 elif source == b'parent-2':
1717 1718 parent = p2
1718 1719 if p2 == nullid:
1719 1720 parent = p1
1720 1721 baserev = orig.rev(parent)
1721 1722 cachedelta = (baserev, orig.revdiff(parent, rev))
1722 1723 elif source == b'parent-smallest':
1723 1724 p1diff = orig.revdiff(p1, rev)
1724 1725 parent = p1
1725 1726 diff = p1diff
1726 1727 if p2 != nullid:
1727 1728 p2diff = orig.revdiff(p2, rev)
1728 1729 if len(p1diff) > len(p2diff):
1729 1730 parent = p2
1730 1731 diff = p2diff
1731 1732 baserev = orig.rev(parent)
1732 1733 cachedelta = (baserev, diff)
1733 1734 elif source == b'storage':
1734 1735 baserev = orig.deltaparent(rev)
1735 1736 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
1736 1737
1737 1738 return ((text, tr, linkrev, p1, p2),
1738 1739 {'node': node, 'flags': flags, 'cachedelta': cachedelta})
1739 1740
1740 1741 @contextlib.contextmanager
1741 1742 def _temprevlog(ui, orig, truncaterev):
1742 1743 from mercurial import vfs as vfsmod
1743 1744
1744 1745 if orig._inline:
1745 1746 raise error.Abort('not supporting inline revlog (yet)')
1746 1747
1747 1748 origindexpath = orig.opener.join(orig.indexfile)
1748 1749 origdatapath = orig.opener.join(orig.datafile)
1749 1750 indexname = 'revlog.i'
1750 1751 dataname = 'revlog.d'
1751 1752
1752 1753 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
1753 1754 try:
1754 1755 # copy the data file in a temporary directory
1755 1756 ui.debug('copying data in %s\n' % tmpdir)
1756 1757 destindexpath = os.path.join(tmpdir, 'revlog.i')
1757 1758 destdatapath = os.path.join(tmpdir, 'revlog.d')
1758 1759 shutil.copyfile(origindexpath, destindexpath)
1759 1760 shutil.copyfile(origdatapath, destdatapath)
1760 1761
1761 1762 # remove the data we want to add again
1762 1763 ui.debug('truncating data to be rewritten\n')
1763 1764 with open(destindexpath, 'ab') as index:
1764 1765 index.seek(0)
1765 1766 index.truncate(truncaterev * orig._io.size)
1766 1767 with open(destdatapath, 'ab') as data:
1767 1768 data.seek(0)
1768 1769 data.truncate(orig.start(truncaterev))
1769 1770
1770 1771 # instantiate a new revlog from the temporary copy
1771 1772 ui.debug('truncating adding to be rewritten\n')
1772 1773 vfs = vfsmod.vfs(tmpdir)
1773 1774 vfs.options = getattr(orig.opener, 'options', None)
1774 1775
1775 1776 dest = revlog.revlog(vfs,
1776 1777 indexfile=indexname,
1777 1778 datafile=dataname)
1778 1779 if dest._inline:
1779 1780 raise error.Abort('not supporting inline revlog (yet)')
1780 1781 # make sure internals are initialized
1781 1782 dest.revision(len(dest) - 1)
1782 1783 yield dest
1783 1784 del dest, vfs
1784 1785 finally:
1785 1786 shutil.rmtree(tmpdir, True)
1786 1787
1787 1788 @command(b'perfrevlogchunks', revlogopts + formatteropts +
1788 1789 [(b'e', b'engines', b'', b'compression engines to use'),
1789 1790 (b's', b'startrev', 0, b'revision to start at')],
1790 1791 b'-c|-m|FILE')
1791 1792 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
1792 1793 """Benchmark operations on revlog chunks.
1793 1794
1794 1795 Logically, each revlog is a collection of fulltext revisions. However,
1795 1796 stored within each revlog are "chunks" of possibly compressed data. This
1796 1797 data needs to be read and decompressed or compressed and written.
1797 1798
1798 1799 This command measures the time it takes to read+decompress and recompress
1799 1800 chunks in a revlog. It effectively isolates I/O and compression performance.
1800 1801 For measurements of higher-level operations like resolving revisions,
1801 1802 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
1802 1803 """
1803 1804 opts = _byteskwargs(opts)
1804 1805
1805 1806 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
1806 1807
1807 1808 # _chunkraw was renamed to _getsegmentforrevs.
1808 1809 try:
1809 1810 segmentforrevs = rl._getsegmentforrevs
1810 1811 except AttributeError:
1811 1812 segmentforrevs = rl._chunkraw
1812 1813
1813 1814 # Verify engines argument.
1814 1815 if engines:
1815 1816 engines = set(e.strip() for e in engines.split(b','))
1816 1817 for engine in engines:
1817 1818 try:
1818 1819 util.compressionengines[engine]
1819 1820 except KeyError:
1820 1821 raise error.Abort(b'unknown compression engine: %s' % engine)
1821 1822 else:
1822 1823 engines = []
1823 1824 for e in util.compengines:
1824 1825 engine = util.compengines[e]
1825 1826 try:
1826 1827 if engine.available():
1827 1828 engine.revlogcompressor().compress(b'dummy')
1828 1829 engines.append(e)
1829 1830 except NotImplementedError:
1830 1831 pass
1831 1832
1832 1833 revs = list(rl.revs(startrev, len(rl) - 1))
1833 1834
1834 1835 def rlfh(rl):
1835 1836 if rl._inline:
1836 1837 return getsvfs(repo)(rl.indexfile)
1837 1838 else:
1838 1839 return getsvfs(repo)(rl.datafile)
1839 1840
1840 1841 def doread():
1841 1842 rl.clearcaches()
1842 1843 for rev in revs:
1843 1844 segmentforrevs(rev, rev)
1844 1845
1845 1846 def doreadcachedfh():
1846 1847 rl.clearcaches()
1847 1848 fh = rlfh(rl)
1848 1849 for rev in revs:
1849 1850 segmentforrevs(rev, rev, df=fh)
1850 1851
1851 1852 def doreadbatch():
1852 1853 rl.clearcaches()
1853 1854 segmentforrevs(revs[0], revs[-1])
1854 1855
1855 1856 def doreadbatchcachedfh():
1856 1857 rl.clearcaches()
1857 1858 fh = rlfh(rl)
1858 1859 segmentforrevs(revs[0], revs[-1], df=fh)
1859 1860
1860 1861 def dochunk():
1861 1862 rl.clearcaches()
1862 1863 fh = rlfh(rl)
1863 1864 for rev in revs:
1864 1865 rl._chunk(rev, df=fh)
1865 1866
1866 1867 chunks = [None]
1867 1868
1868 1869 def dochunkbatch():
1869 1870 rl.clearcaches()
1870 1871 fh = rlfh(rl)
1871 1872 # Save chunks as a side-effect.
1872 1873 chunks[0] = rl._chunks(revs, df=fh)
1873 1874
1874 1875 def docompress(compressor):
1875 1876 rl.clearcaches()
1876 1877
1877 1878 try:
1878 1879 # Swap in the requested compression engine.
1879 1880 oldcompressor = rl._compressor
1880 1881 rl._compressor = compressor
1881 1882 for chunk in chunks[0]:
1882 1883 rl.compress(chunk)
1883 1884 finally:
1884 1885 rl._compressor = oldcompressor
1885 1886
1886 1887 benches = [
1887 1888 (lambda: doread(), b'read'),
1888 1889 (lambda: doreadcachedfh(), b'read w/ reused fd'),
1889 1890 (lambda: doreadbatch(), b'read batch'),
1890 1891 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
1891 1892 (lambda: dochunk(), b'chunk'),
1892 1893 (lambda: dochunkbatch(), b'chunk batch'),
1893 1894 ]
1894 1895
1895 1896 for engine in sorted(engines):
1896 1897 compressor = util.compengines[engine].revlogcompressor()
1897 1898 benches.append((functools.partial(docompress, compressor),
1898 1899 b'compress w/ %s' % engine))
1899 1900
1900 1901 for fn, title in benches:
1901 1902 timer, fm = gettimer(ui, opts)
1902 1903 timer(fn, title=title)
1903 1904 fm.end()
1904 1905
1905 1906 @command(b'perfrevlogrevision', revlogopts + formatteropts +
1906 1907 [(b'', b'cache', False, b'use caches instead of clearing')],
1907 1908 b'-c|-m|FILE REV')
1908 1909 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
1909 1910 """Benchmark obtaining a revlog revision.
1910 1911
1911 1912 Obtaining a revlog revision consists of roughly the following steps:
1912 1913
1913 1914 1. Compute the delta chain
1914 1915 2. Slice the delta chain if applicable
1915 1916 3. Obtain the raw chunks for that delta chain
1916 1917 4. Decompress each raw chunk
1917 1918 5. Apply binary patches to obtain fulltext
1918 1919 6. Verify hash of fulltext
1919 1920
1920 1921 This command measures the time spent in each of these phases.
1921 1922 """
1922 1923 opts = _byteskwargs(opts)
1923 1924
1924 1925 if opts.get(b'changelog') or opts.get(b'manifest'):
1925 1926 file_, rev = None, file_
1926 1927 elif rev is None:
1927 1928 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
1928 1929
1929 1930 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
1930 1931
1931 1932 # _chunkraw was renamed to _getsegmentforrevs.
1932 1933 try:
1933 1934 segmentforrevs = r._getsegmentforrevs
1934 1935 except AttributeError:
1935 1936 segmentforrevs = r._chunkraw
1936 1937
1937 1938 node = r.lookup(rev)
1938 1939 rev = r.rev(node)
1939 1940
1940 1941 def getrawchunks(data, chain):
1941 1942 start = r.start
1942 1943 length = r.length
1943 1944 inline = r._inline
1944 1945 iosize = r._io.size
1945 1946 buffer = util.buffer
1946 1947
1947 1948 chunks = []
1948 1949 ladd = chunks.append
1949 1950 for idx, item in enumerate(chain):
1950 1951 offset = start(item[0])
1951 1952 bits = data[idx]
1952 1953 for rev in item:
1953 1954 chunkstart = start(rev)
1954 1955 if inline:
1955 1956 chunkstart += (rev + 1) * iosize
1956 1957 chunklength = length(rev)
1957 1958 ladd(buffer(bits, chunkstart - offset, chunklength))
1958 1959
1959 1960 return chunks
1960 1961
1961 1962 def dodeltachain(rev):
1962 1963 if not cache:
1963 1964 r.clearcaches()
1964 1965 r._deltachain(rev)
1965 1966
1966 1967 def doread(chain):
1967 1968 if not cache:
1968 1969 r.clearcaches()
1969 1970 for item in slicedchain:
1970 1971 segmentforrevs(item[0], item[-1])
1971 1972
1972 1973 def doslice(r, chain, size):
1973 1974 for s in slicechunk(r, chain, targetsize=size):
1974 1975 pass
1975 1976
1976 1977 def dorawchunks(data, chain):
1977 1978 if not cache:
1978 1979 r.clearcaches()
1979 1980 getrawchunks(data, chain)
1980 1981
1981 1982 def dodecompress(chunks):
1982 1983 decomp = r.decompress
1983 1984 for chunk in chunks:
1984 1985 decomp(chunk)
1985 1986
1986 1987 def dopatch(text, bins):
1987 1988 if not cache:
1988 1989 r.clearcaches()
1989 1990 mdiff.patches(text, bins)
1990 1991
1991 1992 def dohash(text):
1992 1993 if not cache:
1993 1994 r.clearcaches()
1994 1995 r.checkhash(text, node, rev=rev)
1995 1996
1996 1997 def dorevision():
1997 1998 if not cache:
1998 1999 r.clearcaches()
1999 2000 r.revision(node)
2000 2001
2001 2002 try:
2002 2003 from mercurial.revlogutils.deltas import slicechunk
2003 2004 except ImportError:
2004 2005 slicechunk = getattr(revlog, '_slicechunk', None)
2005 2006
2006 2007 size = r.length(rev)
2007 2008 chain = r._deltachain(rev)[0]
2008 2009 if not getattr(r, '_withsparseread', False):
2009 2010 slicedchain = (chain,)
2010 2011 else:
2011 2012 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
2012 2013 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
2013 2014 rawchunks = getrawchunks(data, slicedchain)
2014 2015 bins = r._chunks(chain)
2015 2016 text = bytes(bins[0])
2016 2017 bins = bins[1:]
2017 2018 text = mdiff.patches(text, bins)
2018 2019
2019 2020 benches = [
2020 2021 (lambda: dorevision(), b'full'),
2021 2022 (lambda: dodeltachain(rev), b'deltachain'),
2022 2023 (lambda: doread(chain), b'read'),
2023 2024 ]
2024 2025
2025 2026 if getattr(r, '_withsparseread', False):
2026 2027 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
2027 2028 benches.append(slicing)
2028 2029
2029 2030 benches.extend([
2030 2031 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
2031 2032 (lambda: dodecompress(rawchunks), b'decompress'),
2032 2033 (lambda: dopatch(text, bins), b'patch'),
2033 2034 (lambda: dohash(text), b'hash'),
2034 2035 ])
2035 2036
2036 2037 timer, fm = gettimer(ui, opts)
2037 2038 for fn, title in benches:
2038 2039 timer(fn, title=title)
2039 2040 fm.end()
2040 2041
2041 2042 @command(b'perfrevset',
2042 2043 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
2043 2044 (b'', b'contexts', False, b'obtain changectx for each revision')]
2044 2045 + formatteropts, b"REVSET")
2045 2046 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
2046 2047 """benchmark the execution time of a revset
2047 2048
2048 2049 Use the --clean option if need to evaluate the impact of build volatile
2049 2050 revisions set cache on the revset execution. Volatile cache hold filtered
2050 2051 and obsolete related cache."""
2051 2052 opts = _byteskwargs(opts)
2052 2053
2053 2054 timer, fm = gettimer(ui, opts)
2054 2055 def d():
2055 2056 if clear:
2056 2057 repo.invalidatevolatilesets()
2057 2058 if contexts:
2058 2059 for ctx in repo.set(expr): pass
2059 2060 else:
2060 2061 for r in repo.revs(expr): pass
2061 2062 timer(d)
2062 2063 fm.end()
2063 2064
2064 2065 @command(b'perfvolatilesets',
2065 2066 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
2066 2067 ] + formatteropts)
2067 2068 def perfvolatilesets(ui, repo, *names, **opts):
2068 2069 """benchmark the computation of various volatile set
2069 2070
2070 2071 Volatile set computes element related to filtering and obsolescence."""
2071 2072 opts = _byteskwargs(opts)
2072 2073 timer, fm = gettimer(ui, opts)
2073 2074 repo = repo.unfiltered()
2074 2075
2075 2076 def getobs(name):
2076 2077 def d():
2077 2078 repo.invalidatevolatilesets()
2078 2079 if opts[b'clear_obsstore']:
2079 2080 clearfilecache(repo, b'obsstore')
2080 2081 obsolete.getrevs(repo, name)
2081 2082 return d
2082 2083
2083 2084 allobs = sorted(obsolete.cachefuncs)
2084 2085 if names:
2085 2086 allobs = [n for n in allobs if n in names]
2086 2087
2087 2088 for name in allobs:
2088 2089 timer(getobs(name), title=name)
2089 2090
2090 2091 def getfiltered(name):
2091 2092 def d():
2092 2093 repo.invalidatevolatilesets()
2093 2094 if opts[b'clear_obsstore']:
2094 2095 clearfilecache(repo, b'obsstore')
2095 2096 repoview.filterrevs(repo, name)
2096 2097 return d
2097 2098
2098 2099 allfilter = sorted(repoview.filtertable)
2099 2100 if names:
2100 2101 allfilter = [n for n in allfilter if n in names]
2101 2102
2102 2103 for name in allfilter:
2103 2104 timer(getfiltered(name), title=name)
2104 2105 fm.end()
2105 2106
2106 2107 @command(b'perfbranchmap',
2107 2108 [(b'f', b'full', False,
2108 2109 b'Includes build time of subset'),
2109 2110 (b'', b'clear-revbranch', False,
2110 2111 b'purge the revbranch cache between computation'),
2111 2112 ] + formatteropts)
2112 2113 def perfbranchmap(ui, repo, *filternames, **opts):
2113 2114 """benchmark the update of a branchmap
2114 2115
2115 2116 This benchmarks the full repo.branchmap() call with read and write disabled
2116 2117 """
2117 2118 opts = _byteskwargs(opts)
2118 2119 full = opts.get(b"full", False)
2119 2120 clear_revbranch = opts.get(b"clear_revbranch", False)
2120 2121 timer, fm = gettimer(ui, opts)
2121 2122 def getbranchmap(filtername):
2122 2123 """generate a benchmark function for the filtername"""
2123 2124 if filtername is None:
2124 2125 view = repo
2125 2126 else:
2126 2127 view = repo.filtered(filtername)
2127 2128 def d():
2128 2129 if clear_revbranch:
2129 2130 repo.revbranchcache()._clear()
2130 2131 if full:
2131 2132 view._branchcaches.clear()
2132 2133 else:
2133 2134 view._branchcaches.pop(filtername, None)
2134 2135 view.branchmap()
2135 2136 return d
2136 2137 # add filter in smaller subset to bigger subset
2137 2138 possiblefilters = set(repoview.filtertable)
2138 2139 if filternames:
2139 2140 possiblefilters &= set(filternames)
2140 2141 subsettable = getbranchmapsubsettable()
2141 2142 allfilters = []
2142 2143 while possiblefilters:
2143 2144 for name in possiblefilters:
2144 2145 subset = subsettable.get(name)
2145 2146 if subset not in possiblefilters:
2146 2147 break
2147 2148 else:
2148 2149 assert False, b'subset cycle %s!' % possiblefilters
2149 2150 allfilters.append(name)
2150 2151 possiblefilters.remove(name)
2151 2152
2152 2153 # warm the cache
2153 2154 if not full:
2154 2155 for name in allfilters:
2155 2156 repo.filtered(name).branchmap()
2156 2157 if not filternames or b'unfiltered' in filternames:
2157 2158 # add unfiltered
2158 2159 allfilters.append(None)
2159 2160
2160 2161 branchcacheread = safeattrsetter(branchmap, b'read')
2161 2162 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
2162 2163 branchcacheread.set(lambda repo: None)
2163 2164 branchcachewrite.set(lambda bc, repo: None)
2164 2165 try:
2165 2166 for name in allfilters:
2166 2167 printname = name
2167 2168 if name is None:
2168 2169 printname = b'unfiltered'
2169 2170 timer(getbranchmap(name), title=str(printname))
2170 2171 finally:
2171 2172 branchcacheread.restore()
2172 2173 branchcachewrite.restore()
2173 2174 fm.end()
2174 2175
2175 2176 @command(b'perfbranchmapload', [
2176 2177 (b'f', b'filter', b'', b'Specify repoview filter'),
2177 2178 (b'', b'list', False, b'List brachmap filter caches'),
2178 2179 ] + formatteropts)
2179 2180 def perfbranchmapread(ui, repo, filter=b'', list=False, **opts):
2180 2181 """benchmark reading the branchmap"""
2181 2182 opts = _byteskwargs(opts)
2182 2183
2183 2184 if list:
2184 2185 for name, kind, st in repo.cachevfs.readdir(stat=True):
2185 2186 if name.startswith(b'branch2'):
2186 2187 filtername = name.partition(b'-')[2] or b'unfiltered'
2187 2188 ui.status(b'%s - %s\n'
2188 2189 % (filtername, util.bytecount(st.st_size)))
2189 2190 return
2190 2191 if filter:
2191 2192 repo = repoview.repoview(repo, filter)
2192 2193 else:
2193 2194 repo = repo.unfiltered()
2194 2195 # try once without timer, the filter may not be cached
2195 2196 if branchmap.read(repo) is None:
2196 2197 raise error.Abort(b'No brachmap cached for %s repo'
2197 2198 % (filter or b'unfiltered'))
2198 2199 timer, fm = gettimer(ui, opts)
2199 2200 timer(lambda: branchmap.read(repo) and None)
2200 2201 fm.end()
2201 2202
2202 2203 @command(b'perfloadmarkers')
2203 2204 def perfloadmarkers(ui, repo):
2204 2205 """benchmark the time to parse the on-disk markers for a repo
2205 2206
2206 2207 Result is the number of markers in the repo."""
2207 2208 timer, fm = gettimer(ui)
2208 2209 svfs = getsvfs(repo)
2209 2210 timer(lambda: len(obsolete.obsstore(svfs)))
2210 2211 fm.end()
2211 2212
2212 2213 @command(b'perflrucachedict', formatteropts +
2213 2214 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
2214 2215 (b'', b'mincost', 0, b'smallest cost of items in cache'),
2215 2216 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
2216 2217 (b'', b'size', 4, b'size of cache'),
2217 2218 (b'', b'gets', 10000, b'number of key lookups'),
2218 2219 (b'', b'sets', 10000, b'number of key sets'),
2219 2220 (b'', b'mixed', 10000, b'number of mixed mode operations'),
2220 2221 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
2221 2222 norepo=True)
2222 2223 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
2223 2224 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
2224 2225 opts = _byteskwargs(opts)
2225 2226
2226 2227 def doinit():
2227 2228 for i in _xrange(10000):
2228 2229 util.lrucachedict(size)
2229 2230
2230 2231 costrange = list(range(mincost, maxcost + 1))
2231 2232
2232 2233 values = []
2233 2234 for i in _xrange(size):
2234 2235 values.append(random.randint(0, _maxint))
2235 2236
2236 2237 # Get mode fills the cache and tests raw lookup performance with no
2237 2238 # eviction.
2238 2239 getseq = []
2239 2240 for i in _xrange(gets):
2240 2241 getseq.append(random.choice(values))
2241 2242
2242 2243 def dogets():
2243 2244 d = util.lrucachedict(size)
2244 2245 for v in values:
2245 2246 d[v] = v
2246 2247 for key in getseq:
2247 2248 value = d[key]
2248 2249 value # silence pyflakes warning
2249 2250
2250 2251 def dogetscost():
2251 2252 d = util.lrucachedict(size, maxcost=costlimit)
2252 2253 for i, v in enumerate(values):
2253 2254 d.insert(v, v, cost=costs[i])
2254 2255 for key in getseq:
2255 2256 try:
2256 2257 value = d[key]
2257 2258 value # silence pyflakes warning
2258 2259 except KeyError:
2259 2260 pass
2260 2261
2261 2262 # Set mode tests insertion speed with cache eviction.
2262 2263 setseq = []
2263 2264 costs = []
2264 2265 for i in _xrange(sets):
2265 2266 setseq.append(random.randint(0, _maxint))
2266 2267 costs.append(random.choice(costrange))
2267 2268
2268 2269 def doinserts():
2269 2270 d = util.lrucachedict(size)
2270 2271 for v in setseq:
2271 2272 d.insert(v, v)
2272 2273
2273 2274 def doinsertscost():
2274 2275 d = util.lrucachedict(size, maxcost=costlimit)
2275 2276 for i, v in enumerate(setseq):
2276 2277 d.insert(v, v, cost=costs[i])
2277 2278
2278 2279 def dosets():
2279 2280 d = util.lrucachedict(size)
2280 2281 for v in setseq:
2281 2282 d[v] = v
2282 2283
2283 2284 # Mixed mode randomly performs gets and sets with eviction.
2284 2285 mixedops = []
2285 2286 for i in _xrange(mixed):
2286 2287 r = random.randint(0, 100)
2287 2288 if r < mixedgetfreq:
2288 2289 op = 0
2289 2290 else:
2290 2291 op = 1
2291 2292
2292 2293 mixedops.append((op,
2293 2294 random.randint(0, size * 2),
2294 2295 random.choice(costrange)))
2295 2296
2296 2297 def domixed():
2297 2298 d = util.lrucachedict(size)
2298 2299
2299 2300 for op, v, cost in mixedops:
2300 2301 if op == 0:
2301 2302 try:
2302 2303 d[v]
2303 2304 except KeyError:
2304 2305 pass
2305 2306 else:
2306 2307 d[v] = v
2307 2308
2308 2309 def domixedcost():
2309 2310 d = util.lrucachedict(size, maxcost=costlimit)
2310 2311
2311 2312 for op, v, cost in mixedops:
2312 2313 if op == 0:
2313 2314 try:
2314 2315 d[v]
2315 2316 except KeyError:
2316 2317 pass
2317 2318 else:
2318 2319 d.insert(v, v, cost=cost)
2319 2320
2320 2321 benches = [
2321 2322 (doinit, b'init'),
2322 2323 ]
2323 2324
2324 2325 if costlimit:
2325 2326 benches.extend([
2326 2327 (dogetscost, b'gets w/ cost limit'),
2327 2328 (doinsertscost, b'inserts w/ cost limit'),
2328 2329 (domixedcost, b'mixed w/ cost limit'),
2329 2330 ])
2330 2331 else:
2331 2332 benches.extend([
2332 2333 (dogets, b'gets'),
2333 2334 (doinserts, b'inserts'),
2334 2335 (dosets, b'sets'),
2335 2336 (domixed, b'mixed')
2336 2337 ])
2337 2338
2338 2339 for fn, title in benches:
2339 2340 timer, fm = gettimer(ui, opts)
2340 2341 timer(fn, title=title)
2341 2342 fm.end()
2342 2343
2343 2344 @command(b'perfwrite', formatteropts)
2344 2345 def perfwrite(ui, repo, **opts):
2345 2346 """microbenchmark ui.write
2346 2347 """
2347 2348 opts = _byteskwargs(opts)
2348 2349
2349 2350 timer, fm = gettimer(ui, opts)
2350 2351 def write():
2351 2352 for i in range(100000):
2352 2353 ui.write((b'Testing write performance\n'))
2353 2354 timer(write)
2354 2355 fm.end()
2355 2356
2356 2357 def uisetup(ui):
2357 2358 if (util.safehasattr(cmdutil, b'openrevlog') and
2358 2359 not util.safehasattr(commands, b'debugrevlogopts')):
2359 2360 # for "historical portability":
2360 2361 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
2361 2362 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
2362 2363 # openrevlog() should cause failure, because it has been
2363 2364 # available since 3.5 (or 49c583ca48c4).
2364 2365 def openrevlog(orig, repo, cmd, file_, opts):
2365 2366 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
2366 2367 raise error.Abort(b"This version doesn't support --dir option",
2367 2368 hint=b"use 3.5 or later")
2368 2369 return orig(repo, cmd, file_, opts)
2369 2370 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
General Comments 0
You need to be logged in to leave comments. Login now