##// END OF EJS Templates
perf: add a `setup` argument to run code outside of the timed section...
Boris Feld -
r40716:9d88ae5c default
parent child Browse files
Show More
@@ -1,2365 +1,2367
1 1 # perf.py - performance test routines
2 2 '''helper extension to measure performance'''
3 3
4 4 # "historical portability" policy of perf.py:
5 5 #
6 6 # We have to do:
7 7 # - make perf.py "loadable" with as wide Mercurial version as possible
8 8 # This doesn't mean that perf commands work correctly with that Mercurial.
9 9 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
10 10 # - make historical perf command work correctly with as wide Mercurial
11 11 # version as possible
12 12 #
13 13 # We have to do, if possible with reasonable cost:
14 14 # - make recent perf command for historical feature work correctly
15 15 # with early Mercurial
16 16 #
17 17 # We don't have to do:
18 18 # - make perf command for recent feature work correctly with early
19 19 # Mercurial
20 20
21 21 from __future__ import absolute_import
22 22 import contextlib
23 23 import functools
24 24 import gc
25 25 import os
26 26 import random
27 27 import shutil
28 28 import struct
29 29 import sys
30 30 import tempfile
31 31 import threading
32 32 import time
33 33 from mercurial import (
34 34 changegroup,
35 35 cmdutil,
36 36 commands,
37 37 copies,
38 38 error,
39 39 extensions,
40 40 mdiff,
41 41 merge,
42 42 revlog,
43 43 util,
44 44 )
45 45
46 46 # for "historical portability":
47 47 # try to import modules separately (in dict order), and ignore
48 48 # failure, because these aren't available with early Mercurial
49 49 try:
50 50 from mercurial import branchmap # since 2.5 (or bcee63733aad)
51 51 except ImportError:
52 52 pass
53 53 try:
54 54 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
55 55 except ImportError:
56 56 pass
57 57 try:
58 58 from mercurial import registrar # since 3.7 (or 37d50250b696)
59 59 dir(registrar) # forcibly load it
60 60 except ImportError:
61 61 registrar = None
62 62 try:
63 63 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
64 64 except ImportError:
65 65 pass
66 66 try:
67 67 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
68 68 except ImportError:
69 69 pass
70 70
71 71 def identity(a):
72 72 return a
73 73
74 74 try:
75 75 from mercurial import pycompat
76 76 getargspec = pycompat.getargspec # added to module after 4.5
77 77 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
78 78 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
79 79 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
80 80 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
81 81 if pycompat.ispy3:
82 82 _maxint = sys.maxsize # per py3 docs for replacing maxint
83 83 else:
84 84 _maxint = sys.maxint
85 85 except (ImportError, AttributeError):
86 86 import inspect
87 87 getargspec = inspect.getargspec
88 88 _byteskwargs = identity
89 89 fsencode = identity # no py3 support
90 90 _maxint = sys.maxint # no py3 support
91 91 _sysstr = lambda x: x # no py3 support
92 92 _xrange = xrange
93 93
94 94 try:
95 95 # 4.7+
96 96 queue = pycompat.queue.Queue
97 97 except (AttributeError, ImportError):
98 98 # <4.7.
99 99 try:
100 100 queue = pycompat.queue
101 101 except (AttributeError, ImportError):
102 102 queue = util.queue
103 103
104 104 try:
105 105 from mercurial import logcmdutil
106 106 makelogtemplater = logcmdutil.maketemplater
107 107 except (AttributeError, ImportError):
108 108 try:
109 109 makelogtemplater = cmdutil.makelogtemplater
110 110 except (AttributeError, ImportError):
111 111 makelogtemplater = None
112 112
113 113 # for "historical portability":
114 114 # define util.safehasattr forcibly, because util.safehasattr has been
115 115 # available since 1.9.3 (or 94b200a11cf7)
116 116 _undefined = object()
117 117 def safehasattr(thing, attr):
118 118 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
119 119 setattr(util, 'safehasattr', safehasattr)
120 120
121 121 # for "historical portability":
122 122 # define util.timer forcibly, because util.timer has been available
123 123 # since ae5d60bb70c9
124 124 if safehasattr(time, 'perf_counter'):
125 125 util.timer = time.perf_counter
126 126 elif os.name == b'nt':
127 127 util.timer = time.clock
128 128 else:
129 129 util.timer = time.time
130 130
131 131 # for "historical portability":
132 132 # use locally defined empty option list, if formatteropts isn't
133 133 # available, because commands.formatteropts has been available since
134 134 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
135 135 # available since 2.2 (or ae5f92e154d3)
136 136 formatteropts = getattr(cmdutil, "formatteropts",
137 137 getattr(commands, "formatteropts", []))
138 138
139 139 # for "historical portability":
140 140 # use locally defined option list, if debugrevlogopts isn't available,
141 141 # because commands.debugrevlogopts has been available since 3.7 (or
142 142 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
143 143 # since 1.9 (or a79fea6b3e77).
144 144 revlogopts = getattr(cmdutil, "debugrevlogopts",
145 145 getattr(commands, "debugrevlogopts", [
146 146 (b'c', b'changelog', False, (b'open changelog')),
147 147 (b'm', b'manifest', False, (b'open manifest')),
148 148 (b'', b'dir', False, (b'open directory manifest')),
149 149 ]))
150 150
151 151 cmdtable = {}
152 152
153 153 # for "historical portability":
154 154 # define parsealiases locally, because cmdutil.parsealiases has been
155 155 # available since 1.5 (or 6252852b4332)
156 156 def parsealiases(cmd):
157 157 return cmd.split(b"|")
158 158
159 159 if safehasattr(registrar, 'command'):
160 160 command = registrar.command(cmdtable)
161 161 elif safehasattr(cmdutil, 'command'):
162 162 command = cmdutil.command(cmdtable)
163 163 if b'norepo' not in getargspec(command).args:
164 164 # for "historical portability":
165 165 # wrap original cmdutil.command, because "norepo" option has
166 166 # been available since 3.1 (or 75a96326cecb)
167 167 _command = command
168 168 def command(name, options=(), synopsis=None, norepo=False):
169 169 if norepo:
170 170 commands.norepo += b' %s' % b' '.join(parsealiases(name))
171 171 return _command(name, list(options), synopsis)
172 172 else:
173 173 # for "historical portability":
174 174 # define "@command" annotation locally, because cmdutil.command
175 175 # has been available since 1.9 (or 2daa5179e73f)
176 176 def command(name, options=(), synopsis=None, norepo=False):
177 177 def decorator(func):
178 178 if synopsis:
179 179 cmdtable[name] = func, list(options), synopsis
180 180 else:
181 181 cmdtable[name] = func, list(options)
182 182 if norepo:
183 183 commands.norepo += b' %s' % b' '.join(parsealiases(name))
184 184 return func
185 185 return decorator
186 186
187 187 try:
188 188 import mercurial.registrar
189 189 import mercurial.configitems
190 190 configtable = {}
191 191 configitem = mercurial.registrar.configitem(configtable)
192 192 configitem(b'perf', b'presleep',
193 193 default=mercurial.configitems.dynamicdefault,
194 194 )
195 195 configitem(b'perf', b'stub',
196 196 default=mercurial.configitems.dynamicdefault,
197 197 )
198 198 configitem(b'perf', b'parentscount',
199 199 default=mercurial.configitems.dynamicdefault,
200 200 )
201 201 configitem(b'perf', b'all-timing',
202 202 default=mercurial.configitems.dynamicdefault,
203 203 )
204 204 except (ImportError, AttributeError):
205 205 pass
206 206
207 207 def getlen(ui):
208 208 if ui.configbool(b"perf", b"stub", False):
209 209 return lambda x: 1
210 210 return len
211 211
212 212 def gettimer(ui, opts=None):
213 213 """return a timer function and formatter: (timer, formatter)
214 214
215 215 This function exists to gather the creation of formatter in a single
216 216 place instead of duplicating it in all performance commands."""
217 217
218 218 # enforce an idle period before execution to counteract power management
219 219 # experimental config: perf.presleep
220 220 time.sleep(getint(ui, b"perf", b"presleep", 1))
221 221
222 222 if opts is None:
223 223 opts = {}
224 224 # redirect all to stderr unless buffer api is in use
225 225 if not ui._buffers:
226 226 ui = ui.copy()
227 227 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
228 228 if uifout:
229 229 # for "historical portability":
230 230 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
231 231 uifout.set(ui.ferr)
232 232
233 233 # get a formatter
234 234 uiformatter = getattr(ui, 'formatter', None)
235 235 if uiformatter:
236 236 fm = uiformatter(b'perf', opts)
237 237 else:
238 238 # for "historical portability":
239 239 # define formatter locally, because ui.formatter has been
240 240 # available since 2.2 (or ae5f92e154d3)
241 241 from mercurial import node
242 242 class defaultformatter(object):
243 243 """Minimized composition of baseformatter and plainformatter
244 244 """
245 245 def __init__(self, ui, topic, opts):
246 246 self._ui = ui
247 247 if ui.debugflag:
248 248 self.hexfunc = node.hex
249 249 else:
250 250 self.hexfunc = node.short
251 251 def __nonzero__(self):
252 252 return False
253 253 __bool__ = __nonzero__
254 254 def startitem(self):
255 255 pass
256 256 def data(self, **data):
257 257 pass
258 258 def write(self, fields, deftext, *fielddata, **opts):
259 259 self._ui.write(deftext % fielddata, **opts)
260 260 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
261 261 if cond:
262 262 self._ui.write(deftext % fielddata, **opts)
263 263 def plain(self, text, **opts):
264 264 self._ui.write(text, **opts)
265 265 def end(self):
266 266 pass
267 267 fm = defaultformatter(ui, b'perf', opts)
268 268
269 269 # stub function, runs code only once instead of in a loop
270 270 # experimental config: perf.stub
271 271 if ui.configbool(b"perf", b"stub", False):
272 272 return functools.partial(stub_timer, fm), fm
273 273
274 274 # experimental config: perf.all-timing
275 275 displayall = ui.configbool(b"perf", b"all-timing", False)
276 276 return functools.partial(_timer, fm, displayall=displayall), fm
277 277
278 def stub_timer(fm, func, title=None):
278 def stub_timer(fm, func, setup=None, title=None):
279 279 func()
280 280
281 281 @contextlib.contextmanager
282 282 def timeone():
283 283 r = []
284 284 ostart = os.times()
285 285 cstart = util.timer()
286 286 yield r
287 287 cstop = util.timer()
288 288 ostop = os.times()
289 289 a, b = ostart, ostop
290 290 r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
291 291
292 def _timer(fm, func, title=None, displayall=False):
292 def _timer(fm, func, setup=None, title=None, displayall=False):
293 293 gc.collect()
294 294 results = []
295 295 begin = util.timer()
296 296 count = 0
297 297 while True:
298 if setup is not None:
299 setup()
298 300 with timeone() as item:
299 301 r = func()
300 302 count += 1
301 303 results.append(item[0])
302 304 cstop = util.timer()
303 305 if cstop - begin > 3 and count >= 100:
304 306 break
305 307 if cstop - begin > 10 and count >= 3:
306 308 break
307 309
308 310 formatone(fm, results, title=title, result=r,
309 311 displayall=displayall)
310 312
311 313 def formatone(fm, timings, title=None, result=None, displayall=False):
312 314
313 315 count = len(timings)
314 316
315 317 fm.startitem()
316 318
317 319 if title:
318 320 fm.write(b'title', b'! %s\n', title)
319 321 if result:
320 322 fm.write(b'result', b'! result: %s\n', result)
321 323 def display(role, entry):
322 324 prefix = b''
323 325 if role != b'best':
324 326 prefix = b'%s.' % role
325 327 fm.plain(b'!')
326 328 fm.write(prefix + b'wall', b' wall %f', entry[0])
327 329 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
328 330 fm.write(prefix + b'user', b' user %f', entry[1])
329 331 fm.write(prefix + b'sys', b' sys %f', entry[2])
330 332 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
331 333 fm.plain(b'\n')
332 334 timings.sort()
333 335 min_val = timings[0]
334 336 display(b'best', min_val)
335 337 if displayall:
336 338 max_val = timings[-1]
337 339 display(b'max', max_val)
338 340 avg = tuple([sum(x) / count for x in zip(*timings)])
339 341 display(b'avg', avg)
340 342 median = timings[len(timings) // 2]
341 343 display(b'median', median)
342 344
343 345 # utilities for historical portability
344 346
345 347 def getint(ui, section, name, default):
346 348 # for "historical portability":
347 349 # ui.configint has been available since 1.9 (or fa2b596db182)
348 350 v = ui.config(section, name, None)
349 351 if v is None:
350 352 return default
351 353 try:
352 354 return int(v)
353 355 except ValueError:
354 356 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
355 357 % (section, name, v))
356 358
357 359 def safeattrsetter(obj, name, ignoremissing=False):
358 360 """Ensure that 'obj' has 'name' attribute before subsequent setattr
359 361
360 362 This function is aborted, if 'obj' doesn't have 'name' attribute
361 363 at runtime. This avoids overlooking removal of an attribute, which
362 364 breaks assumption of performance measurement, in the future.
363 365
364 366 This function returns the object to (1) assign a new value, and
365 367 (2) restore an original value to the attribute.
366 368
367 369 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
368 370 abortion, and this function returns None. This is useful to
369 371 examine an attribute, which isn't ensured in all Mercurial
370 372 versions.
371 373 """
372 374 if not util.safehasattr(obj, name):
373 375 if ignoremissing:
374 376 return None
375 377 raise error.Abort((b"missing attribute %s of %s might break assumption"
376 378 b" of performance measurement") % (name, obj))
377 379
378 380 origvalue = getattr(obj, _sysstr(name))
379 381 class attrutil(object):
380 382 def set(self, newvalue):
381 383 setattr(obj, _sysstr(name), newvalue)
382 384 def restore(self):
383 385 setattr(obj, _sysstr(name), origvalue)
384 386
385 387 return attrutil()
386 388
387 389 # utilities to examine each internal API changes
388 390
389 391 def getbranchmapsubsettable():
390 392 # for "historical portability":
391 393 # subsettable is defined in:
392 394 # - branchmap since 2.9 (or 175c6fd8cacc)
393 395 # - repoview since 2.5 (or 59a9f18d4587)
394 396 for mod in (branchmap, repoview):
395 397 subsettable = getattr(mod, 'subsettable', None)
396 398 if subsettable:
397 399 return subsettable
398 400
399 401 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
400 402 # branchmap and repoview modules exist, but subsettable attribute
401 403 # doesn't)
402 404 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
403 405 hint=b"use 2.5 or later")
404 406
405 407 def getsvfs(repo):
406 408 """Return appropriate object to access files under .hg/store
407 409 """
408 410 # for "historical portability":
409 411 # repo.svfs has been available since 2.3 (or 7034365089bf)
410 412 svfs = getattr(repo, 'svfs', None)
411 413 if svfs:
412 414 return svfs
413 415 else:
414 416 return getattr(repo, 'sopener')
415 417
416 418 def getvfs(repo):
417 419 """Return appropriate object to access files under .hg
418 420 """
419 421 # for "historical portability":
420 422 # repo.vfs has been available since 2.3 (or 7034365089bf)
421 423 vfs = getattr(repo, 'vfs', None)
422 424 if vfs:
423 425 return vfs
424 426 else:
425 427 return getattr(repo, 'opener')
426 428
427 429 def repocleartagscachefunc(repo):
428 430 """Return the function to clear tags cache according to repo internal API
429 431 """
430 432 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
431 433 # in this case, setattr(repo, '_tagscache', None) or so isn't
432 434 # correct way to clear tags cache, because existing code paths
433 435 # expect _tagscache to be a structured object.
434 436 def clearcache():
435 437 # _tagscache has been filteredpropertycache since 2.5 (or
436 438 # 98c867ac1330), and delattr() can't work in such case
437 439 if b'_tagscache' in vars(repo):
438 440 del repo.__dict__[b'_tagscache']
439 441 return clearcache
440 442
441 443 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
442 444 if repotags: # since 1.4 (or 5614a628d173)
443 445 return lambda : repotags.set(None)
444 446
445 447 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
446 448 if repotagscache: # since 0.6 (or d7df759d0e97)
447 449 return lambda : repotagscache.set(None)
448 450
449 451 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
450 452 # this point, but it isn't so problematic, because:
451 453 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
452 454 # in perftags() causes failure soon
453 455 # - perf.py itself has been available since 1.1 (or eb240755386d)
454 456 raise error.Abort((b"tags API of this hg command is unknown"))
455 457
456 458 # utilities to clear cache
457 459
458 460 def clearfilecache(repo, attrname):
459 461 unfi = repo.unfiltered()
460 462 if attrname in vars(unfi):
461 463 delattr(unfi, attrname)
462 464 unfi._filecache.pop(attrname, None)
463 465
464 466 # perf commands
465 467
466 468 @command(b'perfwalk', formatteropts)
467 469 def perfwalk(ui, repo, *pats, **opts):
468 470 opts = _byteskwargs(opts)
469 471 timer, fm = gettimer(ui, opts)
470 472 m = scmutil.match(repo[None], pats, {})
471 473 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
472 474 ignored=False))))
473 475 fm.end()
474 476
475 477 @command(b'perfannotate', formatteropts)
476 478 def perfannotate(ui, repo, f, **opts):
477 479 opts = _byteskwargs(opts)
478 480 timer, fm = gettimer(ui, opts)
479 481 fc = repo[b'.'][f]
480 482 timer(lambda: len(fc.annotate(True)))
481 483 fm.end()
482 484
483 485 @command(b'perfstatus',
484 486 [(b'u', b'unknown', False,
485 487 b'ask status to look for unknown files')] + formatteropts)
486 488 def perfstatus(ui, repo, **opts):
487 489 opts = _byteskwargs(opts)
488 490 #m = match.always(repo.root, repo.getcwd())
489 491 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
490 492 # False))))
491 493 timer, fm = gettimer(ui, opts)
492 494 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
493 495 fm.end()
494 496
495 497 @command(b'perfaddremove', formatteropts)
496 498 def perfaddremove(ui, repo, **opts):
497 499 opts = _byteskwargs(opts)
498 500 timer, fm = gettimer(ui, opts)
499 501 try:
500 502 oldquiet = repo.ui.quiet
501 503 repo.ui.quiet = True
502 504 matcher = scmutil.match(repo[None])
503 505 opts[b'dry_run'] = True
504 506 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
505 507 finally:
506 508 repo.ui.quiet = oldquiet
507 509 fm.end()
508 510
509 511 def clearcaches(cl):
510 512 # behave somewhat consistently across internal API changes
511 513 if util.safehasattr(cl, b'clearcaches'):
512 514 cl.clearcaches()
513 515 elif util.safehasattr(cl, b'_nodecache'):
514 516 from mercurial.node import nullid, nullrev
515 517 cl._nodecache = {nullid: nullrev}
516 518 cl._nodepos = None
517 519
518 520 @command(b'perfheads', formatteropts)
519 521 def perfheads(ui, repo, **opts):
520 522 opts = _byteskwargs(opts)
521 523 timer, fm = gettimer(ui, opts)
522 524 cl = repo.changelog
523 525 def d():
524 526 len(cl.headrevs())
525 527 clearcaches(cl)
526 528 timer(d)
527 529 fm.end()
528 530
529 531 @command(b'perftags', formatteropts)
530 532 def perftags(ui, repo, **opts):
531 533 import mercurial.changelog
532 534 import mercurial.manifest
533 535
534 536 opts = _byteskwargs(opts)
535 537 timer, fm = gettimer(ui, opts)
536 538 svfs = getsvfs(repo)
537 539 repocleartagscache = repocleartagscachefunc(repo)
538 540 def t():
539 541 repo.changelog = mercurial.changelog.changelog(svfs)
540 542 rootmanifest = mercurial.manifest.manifestrevlog(svfs)
541 543 repo.manifestlog = mercurial.manifest.manifestlog(svfs, repo,
542 544 rootmanifest)
543 545 repocleartagscache()
544 546 return len(repo.tags())
545 547 timer(t)
546 548 fm.end()
547 549
548 550 @command(b'perfancestors', formatteropts)
549 551 def perfancestors(ui, repo, **opts):
550 552 opts = _byteskwargs(opts)
551 553 timer, fm = gettimer(ui, opts)
552 554 heads = repo.changelog.headrevs()
553 555 def d():
554 556 for a in repo.changelog.ancestors(heads):
555 557 pass
556 558 timer(d)
557 559 fm.end()
558 560
559 561 @command(b'perfancestorset', formatteropts)
560 562 def perfancestorset(ui, repo, revset, **opts):
561 563 opts = _byteskwargs(opts)
562 564 timer, fm = gettimer(ui, opts)
563 565 revs = repo.revs(revset)
564 566 heads = repo.changelog.headrevs()
565 567 def d():
566 568 s = repo.changelog.ancestors(heads)
567 569 for rev in revs:
568 570 rev in s
569 571 timer(d)
570 572 fm.end()
571 573
572 574 @command(b'perfbookmarks', formatteropts)
573 575 def perfbookmarks(ui, repo, **opts):
574 576 """benchmark parsing bookmarks from disk to memory"""
575 577 opts = _byteskwargs(opts)
576 578 timer, fm = gettimer(ui, opts)
577 579 def d():
578 580 clearfilecache(repo, b'_bookmarks')
579 581 repo._bookmarks
580 582 timer(d)
581 583 fm.end()
582 584
583 585 @command(b'perfbundleread', formatteropts, b'BUNDLE')
584 586 def perfbundleread(ui, repo, bundlepath, **opts):
585 587 """Benchmark reading of bundle files.
586 588
587 589 This command is meant to isolate the I/O part of bundle reading as
588 590 much as possible.
589 591 """
590 592 from mercurial import (
591 593 bundle2,
592 594 exchange,
593 595 streamclone,
594 596 )
595 597
596 598 opts = _byteskwargs(opts)
597 599
598 600 def makebench(fn):
599 601 def run():
600 602 with open(bundlepath, b'rb') as fh:
601 603 bundle = exchange.readbundle(ui, fh, bundlepath)
602 604 fn(bundle)
603 605
604 606 return run
605 607
606 608 def makereadnbytes(size):
607 609 def run():
608 610 with open(bundlepath, b'rb') as fh:
609 611 bundle = exchange.readbundle(ui, fh, bundlepath)
610 612 while bundle.read(size):
611 613 pass
612 614
613 615 return run
614 616
615 617 def makestdioread(size):
616 618 def run():
617 619 with open(bundlepath, b'rb') as fh:
618 620 while fh.read(size):
619 621 pass
620 622
621 623 return run
622 624
623 625 # bundle1
624 626
625 627 def deltaiter(bundle):
626 628 for delta in bundle.deltaiter():
627 629 pass
628 630
629 631 def iterchunks(bundle):
630 632 for chunk in bundle.getchunks():
631 633 pass
632 634
633 635 # bundle2
634 636
635 637 def forwardchunks(bundle):
636 638 for chunk in bundle._forwardchunks():
637 639 pass
638 640
639 641 def iterparts(bundle):
640 642 for part in bundle.iterparts():
641 643 pass
642 644
643 645 def iterpartsseekable(bundle):
644 646 for part in bundle.iterparts(seekable=True):
645 647 pass
646 648
647 649 def seek(bundle):
648 650 for part in bundle.iterparts(seekable=True):
649 651 part.seek(0, os.SEEK_END)
650 652
651 653 def makepartreadnbytes(size):
652 654 def run():
653 655 with open(bundlepath, b'rb') as fh:
654 656 bundle = exchange.readbundle(ui, fh, bundlepath)
655 657 for part in bundle.iterparts():
656 658 while part.read(size):
657 659 pass
658 660
659 661 return run
660 662
661 663 benches = [
662 664 (makestdioread(8192), b'read(8k)'),
663 665 (makestdioread(16384), b'read(16k)'),
664 666 (makestdioread(32768), b'read(32k)'),
665 667 (makestdioread(131072), b'read(128k)'),
666 668 ]
667 669
668 670 with open(bundlepath, b'rb') as fh:
669 671 bundle = exchange.readbundle(ui, fh, bundlepath)
670 672
671 673 if isinstance(bundle, changegroup.cg1unpacker):
672 674 benches.extend([
673 675 (makebench(deltaiter), b'cg1 deltaiter()'),
674 676 (makebench(iterchunks), b'cg1 getchunks()'),
675 677 (makereadnbytes(8192), b'cg1 read(8k)'),
676 678 (makereadnbytes(16384), b'cg1 read(16k)'),
677 679 (makereadnbytes(32768), b'cg1 read(32k)'),
678 680 (makereadnbytes(131072), b'cg1 read(128k)'),
679 681 ])
680 682 elif isinstance(bundle, bundle2.unbundle20):
681 683 benches.extend([
682 684 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
683 685 (makebench(iterparts), b'bundle2 iterparts()'),
684 686 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
685 687 (makebench(seek), b'bundle2 part seek()'),
686 688 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
687 689 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
688 690 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
689 691 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
690 692 ])
691 693 elif isinstance(bundle, streamclone.streamcloneapplier):
692 694 raise error.Abort(b'stream clone bundles not supported')
693 695 else:
694 696 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
695 697
696 698 for fn, title in benches:
697 699 timer, fm = gettimer(ui, opts)
698 700 timer(fn, title=title)
699 701 fm.end()
700 702
701 703 @command(b'perfchangegroupchangelog', formatteropts +
702 704 [(b'', b'version', b'02', b'changegroup version'),
703 705 (b'r', b'rev', b'', b'revisions to add to changegroup')])
704 706 def perfchangegroupchangelog(ui, repo, version=b'02', rev=None, **opts):
705 707 """Benchmark producing a changelog group for a changegroup.
706 708
707 709 This measures the time spent processing the changelog during a
708 710 bundle operation. This occurs during `hg bundle` and on a server
709 711 processing a `getbundle` wire protocol request (handles clones
710 712 and pull requests).
711 713
712 714 By default, all revisions are added to the changegroup.
713 715 """
714 716 opts = _byteskwargs(opts)
715 717 cl = repo.changelog
716 718 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
717 719 bundler = changegroup.getbundler(version, repo)
718 720
719 721 def d():
720 722 state, chunks = bundler._generatechangelog(cl, nodes)
721 723 for chunk in chunks:
722 724 pass
723 725
724 726 timer, fm = gettimer(ui, opts)
725 727
726 728 # Terminal printing can interfere with timing. So disable it.
727 729 with ui.configoverride({(b'progress', b'disable'): True}):
728 730 timer(d)
729 731
730 732 fm.end()
731 733
732 734 @command(b'perfdirs', formatteropts)
733 735 def perfdirs(ui, repo, **opts):
734 736 opts = _byteskwargs(opts)
735 737 timer, fm = gettimer(ui, opts)
736 738 dirstate = repo.dirstate
737 739 b'a' in dirstate
738 740 def d():
739 741 dirstate.hasdir(b'a')
740 742 del dirstate._map._dirs
741 743 timer(d)
742 744 fm.end()
743 745
744 746 @command(b'perfdirstate', formatteropts)
745 747 def perfdirstate(ui, repo, **opts):
746 748 opts = _byteskwargs(opts)
747 749 timer, fm = gettimer(ui, opts)
748 750 b"a" in repo.dirstate
749 751 def d():
750 752 repo.dirstate.invalidate()
751 753 b"a" in repo.dirstate
752 754 timer(d)
753 755 fm.end()
754 756
755 757 @command(b'perfdirstatedirs', formatteropts)
756 758 def perfdirstatedirs(ui, repo, **opts):
757 759 opts = _byteskwargs(opts)
758 760 timer, fm = gettimer(ui, opts)
759 761 b"a" in repo.dirstate
760 762 def d():
761 763 repo.dirstate.hasdir(b"a")
762 764 del repo.dirstate._map._dirs
763 765 timer(d)
764 766 fm.end()
765 767
766 768 @command(b'perfdirstatefoldmap', formatteropts)
767 769 def perfdirstatefoldmap(ui, repo, **opts):
768 770 opts = _byteskwargs(opts)
769 771 timer, fm = gettimer(ui, opts)
770 772 dirstate = repo.dirstate
771 773 b'a' in dirstate
772 774 def d():
773 775 dirstate._map.filefoldmap.get(b'a')
774 776 del dirstate._map.filefoldmap
775 777 timer(d)
776 778 fm.end()
777 779
778 780 @command(b'perfdirfoldmap', formatteropts)
779 781 def perfdirfoldmap(ui, repo, **opts):
780 782 opts = _byteskwargs(opts)
781 783 timer, fm = gettimer(ui, opts)
782 784 dirstate = repo.dirstate
783 785 b'a' in dirstate
784 786 def d():
785 787 dirstate._map.dirfoldmap.get(b'a')
786 788 del dirstate._map.dirfoldmap
787 789 del dirstate._map._dirs
788 790 timer(d)
789 791 fm.end()
790 792
791 793 @command(b'perfdirstatewrite', formatteropts)
792 794 def perfdirstatewrite(ui, repo, **opts):
793 795 opts = _byteskwargs(opts)
794 796 timer, fm = gettimer(ui, opts)
795 797 ds = repo.dirstate
796 798 b"a" in ds
797 799 def d():
798 800 ds._dirty = True
799 801 ds.write(repo.currenttransaction())
800 802 timer(d)
801 803 fm.end()
802 804
803 805 @command(b'perfmergecalculate',
804 806 [(b'r', b'rev', b'.', b'rev to merge against')] + formatteropts)
805 807 def perfmergecalculate(ui, repo, rev, **opts):
806 808 opts = _byteskwargs(opts)
807 809 timer, fm = gettimer(ui, opts)
808 810 wctx = repo[None]
809 811 rctx = scmutil.revsingle(repo, rev, rev)
810 812 ancestor = wctx.ancestor(rctx)
811 813 # we don't want working dir files to be stat'd in the benchmark, so prime
812 814 # that cache
813 815 wctx.dirty()
814 816 def d():
815 817 # acceptremote is True because we don't want prompts in the middle of
816 818 # our benchmark
817 819 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
818 820 acceptremote=True, followcopies=True)
819 821 timer(d)
820 822 fm.end()
821 823
822 824 @command(b'perfpathcopies', [], b"REV REV")
823 825 def perfpathcopies(ui, repo, rev1, rev2, **opts):
824 826 opts = _byteskwargs(opts)
825 827 timer, fm = gettimer(ui, opts)
826 828 ctx1 = scmutil.revsingle(repo, rev1, rev1)
827 829 ctx2 = scmutil.revsingle(repo, rev2, rev2)
828 830 def d():
829 831 copies.pathcopies(ctx1, ctx2)
830 832 timer(d)
831 833 fm.end()
832 834
833 835 @command(b'perfphases',
834 836 [(b'', b'full', False, b'include file reading time too'),
835 837 ], b"")
836 838 def perfphases(ui, repo, **opts):
837 839 """benchmark phasesets computation"""
838 840 opts = _byteskwargs(opts)
839 841 timer, fm = gettimer(ui, opts)
840 842 _phases = repo._phasecache
841 843 full = opts.get(b'full')
842 844 def d():
843 845 phases = _phases
844 846 if full:
845 847 clearfilecache(repo, b'_phasecache')
846 848 phases = repo._phasecache
847 849 phases.invalidate()
848 850 phases.loadphaserevs(repo)
849 851 timer(d)
850 852 fm.end()
851 853
852 854 @command(b'perfphasesremote',
853 855 [], b"[DEST]")
854 856 def perfphasesremote(ui, repo, dest=None, **opts):
855 857 """benchmark time needed to analyse phases of the remote server"""
856 858 from mercurial.node import (
857 859 bin,
858 860 )
859 861 from mercurial import (
860 862 exchange,
861 863 hg,
862 864 phases,
863 865 )
864 866 opts = _byteskwargs(opts)
865 867 timer, fm = gettimer(ui, opts)
866 868
867 869 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
868 870 if not path:
869 871 raise error.Abort((b'default repository not configured!'),
870 872 hint=(b"see 'hg help config.paths'"))
871 873 dest = path.pushloc or path.loc
872 874 branches = (path.branch, opts.get(b'branch') or [])
873 875 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
874 876 revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get(b'rev'))
875 877 other = hg.peer(repo, opts, dest)
876 878
877 879 # easier to perform discovery through the operation
878 880 op = exchange.pushoperation(repo, other)
879 881 exchange._pushdiscoverychangeset(op)
880 882
881 883 remotesubset = op.fallbackheads
882 884
883 885 with other.commandexecutor() as e:
884 886 remotephases = e.callcommand(b'listkeys',
885 887 {b'namespace': b'phases'}).result()
886 888 del other
887 889 publishing = remotephases.get(b'publishing', False)
888 890 if publishing:
889 891 ui.status((b'publishing: yes\n'))
890 892 else:
891 893 ui.status((b'publishing: no\n'))
892 894
893 895 nodemap = repo.changelog.nodemap
894 896 nonpublishroots = 0
895 897 for nhex, phase in remotephases.iteritems():
896 898 if nhex == b'publishing': # ignore data related to publish option
897 899 continue
898 900 node = bin(nhex)
899 901 if node in nodemap and int(phase):
900 902 nonpublishroots += 1
901 903 ui.status((b'number of roots: %d\n') % len(remotephases))
902 904 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
903 905 def d():
904 906 phases.remotephasessummary(repo,
905 907 remotesubset,
906 908 remotephases)
907 909 timer(d)
908 910 fm.end()
909 911
910 912 @command(b'perfmanifest',[
911 913 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
912 914 (b'', b'clear-disk', False, b'clear on-disk caches too'),
913 915 ] + formatteropts, b'REV|NODE')
914 916 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
915 917 """benchmark the time to read a manifest from disk and return a usable
916 918 dict-like object
917 919
918 920 Manifest caches are cleared before retrieval."""
919 921 opts = _byteskwargs(opts)
920 922 timer, fm = gettimer(ui, opts)
921 923 if not manifest_rev:
922 924 ctx = scmutil.revsingle(repo, rev, rev)
923 925 t = ctx.manifestnode()
924 926 else:
925 927 from mercurial.node import bin
926 928
927 929 if len(rev) == 40:
928 930 t = bin(rev)
929 931 else:
930 932 try:
931 933 rev = int(rev)
932 934
933 935 if util.safehasattr(repo.manifestlog, b'getstorage'):
934 936 t = repo.manifestlog.getstorage(b'').node(rev)
935 937 else:
936 938 t = repo.manifestlog._revlog.lookup(rev)
937 939 except ValueError:
938 940 raise error.Abort(b'manifest revision must be integer or full '
939 941 b'node')
940 942 def d():
941 943 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
942 944 repo.manifestlog[t].read()
943 945 timer(d)
944 946 fm.end()
945 947
946 948 @command(b'perfchangeset', formatteropts)
947 949 def perfchangeset(ui, repo, rev, **opts):
948 950 opts = _byteskwargs(opts)
949 951 timer, fm = gettimer(ui, opts)
950 952 n = scmutil.revsingle(repo, rev).node()
951 953 def d():
952 954 repo.changelog.read(n)
953 955 #repo.changelog._cache = None
954 956 timer(d)
955 957 fm.end()
956 958
957 959 @command(b'perfindex', formatteropts)
958 960 def perfindex(ui, repo, **opts):
959 961 import mercurial.revlog
960 962 opts = _byteskwargs(opts)
961 963 timer, fm = gettimer(ui, opts)
962 964 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
963 965 n = repo[b"tip"].node()
964 966 svfs = getsvfs(repo)
965 967 def d():
966 968 cl = mercurial.revlog.revlog(svfs, b"00changelog.i")
967 969 cl.rev(n)
968 970 timer(d)
969 971 fm.end()
970 972
971 973 @command(b'perfstartup', formatteropts)
972 974 def perfstartup(ui, repo, **opts):
973 975 opts = _byteskwargs(opts)
974 976 timer, fm = gettimer(ui, opts)
975 977 def d():
976 978 if os.name != r'nt':
977 979 os.system(b"HGRCPATH= %s version -q > /dev/null" %
978 980 fsencode(sys.argv[0]))
979 981 else:
980 982 os.environ[r'HGRCPATH'] = r' '
981 983 os.system(r"%s version -q > NUL" % sys.argv[0])
982 984 timer(d)
983 985 fm.end()
984 986
985 987 @command(b'perfparents', formatteropts)
986 988 def perfparents(ui, repo, **opts):
987 989 opts = _byteskwargs(opts)
988 990 timer, fm = gettimer(ui, opts)
989 991 # control the number of commits perfparents iterates over
990 992 # experimental config: perf.parentscount
991 993 count = getint(ui, b"perf", b"parentscount", 1000)
992 994 if len(repo.changelog) < count:
993 995 raise error.Abort(b"repo needs %d commits for this test" % count)
994 996 repo = repo.unfiltered()
995 997 nl = [repo.changelog.node(i) for i in _xrange(count)]
996 998 def d():
997 999 for n in nl:
998 1000 repo.changelog.parents(n)
999 1001 timer(d)
1000 1002 fm.end()
1001 1003
1002 1004 @command(b'perfctxfiles', formatteropts)
1003 1005 def perfctxfiles(ui, repo, x, **opts):
1004 1006 opts = _byteskwargs(opts)
1005 1007 x = int(x)
1006 1008 timer, fm = gettimer(ui, opts)
1007 1009 def d():
1008 1010 len(repo[x].files())
1009 1011 timer(d)
1010 1012 fm.end()
1011 1013
1012 1014 @command(b'perfrawfiles', formatteropts)
1013 1015 def perfrawfiles(ui, repo, x, **opts):
1014 1016 opts = _byteskwargs(opts)
1015 1017 x = int(x)
1016 1018 timer, fm = gettimer(ui, opts)
1017 1019 cl = repo.changelog
1018 1020 def d():
1019 1021 len(cl.read(x)[3])
1020 1022 timer(d)
1021 1023 fm.end()
1022 1024
1023 1025 @command(b'perflookup', formatteropts)
1024 1026 def perflookup(ui, repo, rev, **opts):
1025 1027 opts = _byteskwargs(opts)
1026 1028 timer, fm = gettimer(ui, opts)
1027 1029 timer(lambda: len(repo.lookup(rev)))
1028 1030 fm.end()
1029 1031
1030 1032 @command(b'perflinelogedits',
1031 1033 [(b'n', b'edits', 10000, b'number of edits'),
1032 1034 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1033 1035 ], norepo=True)
1034 1036 def perflinelogedits(ui, **opts):
1035 1037 from mercurial import linelog
1036 1038
1037 1039 opts = _byteskwargs(opts)
1038 1040
1039 1041 edits = opts[b'edits']
1040 1042 maxhunklines = opts[b'max_hunk_lines']
1041 1043
1042 1044 maxb1 = 100000
1043 1045 random.seed(0)
1044 1046 randint = random.randint
1045 1047 currentlines = 0
1046 1048 arglist = []
1047 1049 for rev in _xrange(edits):
1048 1050 a1 = randint(0, currentlines)
1049 1051 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1050 1052 b1 = randint(0, maxb1)
1051 1053 b2 = randint(b1, b1 + maxhunklines)
1052 1054 currentlines += (b2 - b1) - (a2 - a1)
1053 1055 arglist.append((rev, a1, a2, b1, b2))
1054 1056
1055 1057 def d():
1056 1058 ll = linelog.linelog()
1057 1059 for args in arglist:
1058 1060 ll.replacelines(*args)
1059 1061
1060 1062 timer, fm = gettimer(ui, opts)
1061 1063 timer(d)
1062 1064 fm.end()
1063 1065
1064 1066 @command(b'perfrevrange', formatteropts)
1065 1067 def perfrevrange(ui, repo, *specs, **opts):
1066 1068 opts = _byteskwargs(opts)
1067 1069 timer, fm = gettimer(ui, opts)
1068 1070 revrange = scmutil.revrange
1069 1071 timer(lambda: len(revrange(repo, specs)))
1070 1072 fm.end()
1071 1073
1072 1074 @command(b'perfnodelookup', formatteropts)
1073 1075 def perfnodelookup(ui, repo, rev, **opts):
1074 1076 opts = _byteskwargs(opts)
1075 1077 timer, fm = gettimer(ui, opts)
1076 1078 import mercurial.revlog
1077 1079 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1078 1080 n = scmutil.revsingle(repo, rev).node()
1079 1081 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1080 1082 def d():
1081 1083 cl.rev(n)
1082 1084 clearcaches(cl)
1083 1085 timer(d)
1084 1086 fm.end()
1085 1087
1086 1088 @command(b'perflog',
1087 1089 [(b'', b'rename', False, b'ask log to follow renames')
1088 1090 ] + formatteropts)
1089 1091 def perflog(ui, repo, rev=None, **opts):
1090 1092 opts = _byteskwargs(opts)
1091 1093 if rev is None:
1092 1094 rev=[]
1093 1095 timer, fm = gettimer(ui, opts)
1094 1096 ui.pushbuffer()
1095 1097 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1096 1098 copies=opts.get(b'rename')))
1097 1099 ui.popbuffer()
1098 1100 fm.end()
1099 1101
1100 1102 @command(b'perfmoonwalk', formatteropts)
1101 1103 def perfmoonwalk(ui, repo, **opts):
1102 1104 """benchmark walking the changelog backwards
1103 1105
1104 1106 This also loads the changelog data for each revision in the changelog.
1105 1107 """
1106 1108 opts = _byteskwargs(opts)
1107 1109 timer, fm = gettimer(ui, opts)
1108 1110 def moonwalk():
1109 1111 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1110 1112 ctx = repo[i]
1111 1113 ctx.branch() # read changelog data (in addition to the index)
1112 1114 timer(moonwalk)
1113 1115 fm.end()
1114 1116
1115 1117 @command(b'perftemplating',
1116 1118 [(b'r', b'rev', [], b'revisions to run the template on'),
1117 1119 ] + formatteropts)
1118 1120 def perftemplating(ui, repo, testedtemplate=None, **opts):
1119 1121 """test the rendering time of a given template"""
1120 1122 if makelogtemplater is None:
1121 1123 raise error.Abort((b"perftemplating not available with this Mercurial"),
1122 1124 hint=b"use 4.3 or later")
1123 1125
1124 1126 opts = _byteskwargs(opts)
1125 1127
1126 1128 nullui = ui.copy()
1127 1129 nullui.fout = open(os.devnull, r'wb')
1128 1130 nullui.disablepager()
1129 1131 revs = opts.get(b'rev')
1130 1132 if not revs:
1131 1133 revs = [b'all()']
1132 1134 revs = list(scmutil.revrange(repo, revs))
1133 1135
1134 1136 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1135 1137 b' {author|person}: {desc|firstline}\n')
1136 1138 if testedtemplate is None:
1137 1139 testedtemplate = defaulttemplate
1138 1140 displayer = makelogtemplater(nullui, repo, testedtemplate)
1139 1141 def format():
1140 1142 for r in revs:
1141 1143 ctx = repo[r]
1142 1144 displayer.show(ctx)
1143 1145 displayer.flush(ctx)
1144 1146
1145 1147 timer, fm = gettimer(ui, opts)
1146 1148 timer(format)
1147 1149 fm.end()
1148 1150
1149 1151 @command(b'perfcca', formatteropts)
1150 1152 def perfcca(ui, repo, **opts):
1151 1153 opts = _byteskwargs(opts)
1152 1154 timer, fm = gettimer(ui, opts)
1153 1155 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1154 1156 fm.end()
1155 1157
1156 1158 @command(b'perffncacheload', formatteropts)
1157 1159 def perffncacheload(ui, repo, **opts):
1158 1160 opts = _byteskwargs(opts)
1159 1161 timer, fm = gettimer(ui, opts)
1160 1162 s = repo.store
1161 1163 def d():
1162 1164 s.fncache._load()
1163 1165 timer(d)
1164 1166 fm.end()
1165 1167
1166 1168 @command(b'perffncachewrite', formatteropts)
1167 1169 def perffncachewrite(ui, repo, **opts):
1168 1170 opts = _byteskwargs(opts)
1169 1171 timer, fm = gettimer(ui, opts)
1170 1172 s = repo.store
1171 1173 lock = repo.lock()
1172 1174 s.fncache._load()
1173 1175 tr = repo.transaction(b'perffncachewrite')
1174 1176 tr.addbackup(b'fncache')
1175 1177 def d():
1176 1178 s.fncache._dirty = True
1177 1179 s.fncache.write(tr)
1178 1180 timer(d)
1179 1181 tr.close()
1180 1182 lock.release()
1181 1183 fm.end()
1182 1184
1183 1185 @command(b'perffncacheencode', formatteropts)
1184 1186 def perffncacheencode(ui, repo, **opts):
1185 1187 opts = _byteskwargs(opts)
1186 1188 timer, fm = gettimer(ui, opts)
1187 1189 s = repo.store
1188 1190 s.fncache._load()
1189 1191 def d():
1190 1192 for p in s.fncache.entries:
1191 1193 s.encode(p)
1192 1194 timer(d)
1193 1195 fm.end()
1194 1196
1195 1197 def _bdiffworker(q, blocks, xdiff, ready, done):
1196 1198 while not done.is_set():
1197 1199 pair = q.get()
1198 1200 while pair is not None:
1199 1201 if xdiff:
1200 1202 mdiff.bdiff.xdiffblocks(*pair)
1201 1203 elif blocks:
1202 1204 mdiff.bdiff.blocks(*pair)
1203 1205 else:
1204 1206 mdiff.textdiff(*pair)
1205 1207 q.task_done()
1206 1208 pair = q.get()
1207 1209 q.task_done() # for the None one
1208 1210 with ready:
1209 1211 ready.wait()
1210 1212
1211 1213 def _manifestrevision(repo, mnode):
1212 1214 ml = repo.manifestlog
1213 1215
1214 1216 if util.safehasattr(ml, b'getstorage'):
1215 1217 store = ml.getstorage(b'')
1216 1218 else:
1217 1219 store = ml._revlog
1218 1220
1219 1221 return store.revision(mnode)
1220 1222
1221 1223 @command(b'perfbdiff', revlogopts + formatteropts + [
1222 1224 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1223 1225 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1224 1226 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1225 1227 (b'', b'blocks', False, b'test computing diffs into blocks'),
1226 1228 (b'', b'xdiff', False, b'use xdiff algorithm'),
1227 1229 ],
1228 1230
1229 1231 b'-c|-m|FILE REV')
1230 1232 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1231 1233 """benchmark a bdiff between revisions
1232 1234
1233 1235 By default, benchmark a bdiff between its delta parent and itself.
1234 1236
1235 1237 With ``--count``, benchmark bdiffs between delta parents and self for N
1236 1238 revisions starting at the specified revision.
1237 1239
1238 1240 With ``--alldata``, assume the requested revision is a changeset and
1239 1241 measure bdiffs for all changes related to that changeset (manifest
1240 1242 and filelogs).
1241 1243 """
1242 1244 opts = _byteskwargs(opts)
1243 1245
1244 1246 if opts[b'xdiff'] and not opts[b'blocks']:
1245 1247 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1246 1248
1247 1249 if opts[b'alldata']:
1248 1250 opts[b'changelog'] = True
1249 1251
1250 1252 if opts.get(b'changelog') or opts.get(b'manifest'):
1251 1253 file_, rev = None, file_
1252 1254 elif rev is None:
1253 1255 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1254 1256
1255 1257 blocks = opts[b'blocks']
1256 1258 xdiff = opts[b'xdiff']
1257 1259 textpairs = []
1258 1260
1259 1261 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1260 1262
1261 1263 startrev = r.rev(r.lookup(rev))
1262 1264 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1263 1265 if opts[b'alldata']:
1264 1266 # Load revisions associated with changeset.
1265 1267 ctx = repo[rev]
1266 1268 mtext = _manifestrevision(repo, ctx.manifestnode())
1267 1269 for pctx in ctx.parents():
1268 1270 pman = _manifestrevision(repo, pctx.manifestnode())
1269 1271 textpairs.append((pman, mtext))
1270 1272
1271 1273 # Load filelog revisions by iterating manifest delta.
1272 1274 man = ctx.manifest()
1273 1275 pman = ctx.p1().manifest()
1274 1276 for filename, change in pman.diff(man).items():
1275 1277 fctx = repo.file(filename)
1276 1278 f1 = fctx.revision(change[0][0] or -1)
1277 1279 f2 = fctx.revision(change[1][0] or -1)
1278 1280 textpairs.append((f1, f2))
1279 1281 else:
1280 1282 dp = r.deltaparent(rev)
1281 1283 textpairs.append((r.revision(dp), r.revision(rev)))
1282 1284
1283 1285 withthreads = threads > 0
1284 1286 if not withthreads:
1285 1287 def d():
1286 1288 for pair in textpairs:
1287 1289 if xdiff:
1288 1290 mdiff.bdiff.xdiffblocks(*pair)
1289 1291 elif blocks:
1290 1292 mdiff.bdiff.blocks(*pair)
1291 1293 else:
1292 1294 mdiff.textdiff(*pair)
1293 1295 else:
1294 1296 q = queue()
1295 1297 for i in _xrange(threads):
1296 1298 q.put(None)
1297 1299 ready = threading.Condition()
1298 1300 done = threading.Event()
1299 1301 for i in _xrange(threads):
1300 1302 threading.Thread(target=_bdiffworker,
1301 1303 args=(q, blocks, xdiff, ready, done)).start()
1302 1304 q.join()
1303 1305 def d():
1304 1306 for pair in textpairs:
1305 1307 q.put(pair)
1306 1308 for i in _xrange(threads):
1307 1309 q.put(None)
1308 1310 with ready:
1309 1311 ready.notify_all()
1310 1312 q.join()
1311 1313 timer, fm = gettimer(ui, opts)
1312 1314 timer(d)
1313 1315 fm.end()
1314 1316
1315 1317 if withthreads:
1316 1318 done.set()
1317 1319 for i in _xrange(threads):
1318 1320 q.put(None)
1319 1321 with ready:
1320 1322 ready.notify_all()
1321 1323
1322 1324 @command(b'perfunidiff', revlogopts + formatteropts + [
1323 1325 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1324 1326 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
1325 1327 ], b'-c|-m|FILE REV')
1326 1328 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1327 1329 """benchmark a unified diff between revisions
1328 1330
1329 1331 This doesn't include any copy tracing - it's just a unified diff
1330 1332 of the texts.
1331 1333
1332 1334 By default, benchmark a diff between its delta parent and itself.
1333 1335
1334 1336 With ``--count``, benchmark diffs between delta parents and self for N
1335 1337 revisions starting at the specified revision.
1336 1338
1337 1339 With ``--alldata``, assume the requested revision is a changeset and
1338 1340 measure diffs for all changes related to that changeset (manifest
1339 1341 and filelogs).
1340 1342 """
1341 1343 opts = _byteskwargs(opts)
1342 1344 if opts[b'alldata']:
1343 1345 opts[b'changelog'] = True
1344 1346
1345 1347 if opts.get(b'changelog') or opts.get(b'manifest'):
1346 1348 file_, rev = None, file_
1347 1349 elif rev is None:
1348 1350 raise error.CommandError(b'perfunidiff', b'invalid arguments')
1349 1351
1350 1352 textpairs = []
1351 1353
1352 1354 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
1353 1355
1354 1356 startrev = r.rev(r.lookup(rev))
1355 1357 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1356 1358 if opts[b'alldata']:
1357 1359 # Load revisions associated with changeset.
1358 1360 ctx = repo[rev]
1359 1361 mtext = _manifestrevision(repo, ctx.manifestnode())
1360 1362 for pctx in ctx.parents():
1361 1363 pman = _manifestrevision(repo, pctx.manifestnode())
1362 1364 textpairs.append((pman, mtext))
1363 1365
1364 1366 # Load filelog revisions by iterating manifest delta.
1365 1367 man = ctx.manifest()
1366 1368 pman = ctx.p1().manifest()
1367 1369 for filename, change in pman.diff(man).items():
1368 1370 fctx = repo.file(filename)
1369 1371 f1 = fctx.revision(change[0][0] or -1)
1370 1372 f2 = fctx.revision(change[1][0] or -1)
1371 1373 textpairs.append((f1, f2))
1372 1374 else:
1373 1375 dp = r.deltaparent(rev)
1374 1376 textpairs.append((r.revision(dp), r.revision(rev)))
1375 1377
1376 1378 def d():
1377 1379 for left, right in textpairs:
1378 1380 # The date strings don't matter, so we pass empty strings.
1379 1381 headerlines, hunks = mdiff.unidiff(
1380 1382 left, b'', right, b'', b'left', b'right', binary=False)
1381 1383 # consume iterators in roughly the way patch.py does
1382 1384 b'\n'.join(headerlines)
1383 1385 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1384 1386 timer, fm = gettimer(ui, opts)
1385 1387 timer(d)
1386 1388 fm.end()
1387 1389
1388 1390 @command(b'perfdiffwd', formatteropts)
1389 1391 def perfdiffwd(ui, repo, **opts):
1390 1392 """Profile diff of working directory changes"""
1391 1393 opts = _byteskwargs(opts)
1392 1394 timer, fm = gettimer(ui, opts)
1393 1395 options = {
1394 1396 'w': 'ignore_all_space',
1395 1397 'b': 'ignore_space_change',
1396 1398 'B': 'ignore_blank_lines',
1397 1399 }
1398 1400
1399 1401 for diffopt in ('', 'w', 'b', 'B', 'wB'):
1400 1402 opts = dict((options[c], b'1') for c in diffopt)
1401 1403 def d():
1402 1404 ui.pushbuffer()
1403 1405 commands.diff(ui, repo, **opts)
1404 1406 ui.popbuffer()
1405 1407 diffopt = diffopt.encode('ascii')
1406 1408 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
1407 1409 timer(d, title=title)
1408 1410 fm.end()
1409 1411
1410 1412 @command(b'perfrevlogindex', revlogopts + formatteropts,
1411 1413 b'-c|-m|FILE')
1412 1414 def perfrevlogindex(ui, repo, file_=None, **opts):
1413 1415 """Benchmark operations against a revlog index.
1414 1416
1415 1417 This tests constructing a revlog instance, reading index data,
1416 1418 parsing index data, and performing various operations related to
1417 1419 index data.
1418 1420 """
1419 1421
1420 1422 opts = _byteskwargs(opts)
1421 1423
1422 1424 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
1423 1425
1424 1426 opener = getattr(rl, 'opener') # trick linter
1425 1427 indexfile = rl.indexfile
1426 1428 data = opener.read(indexfile)
1427 1429
1428 1430 header = struct.unpack(b'>I', data[0:4])[0]
1429 1431 version = header & 0xFFFF
1430 1432 if version == 1:
1431 1433 revlogio = revlog.revlogio()
1432 1434 inline = header & (1 << 16)
1433 1435 else:
1434 1436 raise error.Abort((b'unsupported revlog version: %d') % version)
1435 1437
1436 1438 rllen = len(rl)
1437 1439
1438 1440 node0 = rl.node(0)
1439 1441 node25 = rl.node(rllen // 4)
1440 1442 node50 = rl.node(rllen // 2)
1441 1443 node75 = rl.node(rllen // 4 * 3)
1442 1444 node100 = rl.node(rllen - 1)
1443 1445
1444 1446 allrevs = range(rllen)
1445 1447 allrevsrev = list(reversed(allrevs))
1446 1448 allnodes = [rl.node(rev) for rev in range(rllen)]
1447 1449 allnodesrev = list(reversed(allnodes))
1448 1450
1449 1451 def constructor():
1450 1452 revlog.revlog(opener, indexfile)
1451 1453
1452 1454 def read():
1453 1455 with opener(indexfile) as fh:
1454 1456 fh.read()
1455 1457
1456 1458 def parseindex():
1457 1459 revlogio.parseindex(data, inline)
1458 1460
1459 1461 def getentry(revornode):
1460 1462 index = revlogio.parseindex(data, inline)[0]
1461 1463 index[revornode]
1462 1464
1463 1465 def getentries(revs, count=1):
1464 1466 index = revlogio.parseindex(data, inline)[0]
1465 1467
1466 1468 for i in range(count):
1467 1469 for rev in revs:
1468 1470 index[rev]
1469 1471
1470 1472 def resolvenode(node):
1471 1473 nodemap = revlogio.parseindex(data, inline)[1]
1472 1474 # This only works for the C code.
1473 1475 if nodemap is None:
1474 1476 return
1475 1477
1476 1478 try:
1477 1479 nodemap[node]
1478 1480 except error.RevlogError:
1479 1481 pass
1480 1482
1481 1483 def resolvenodes(nodes, count=1):
1482 1484 nodemap = revlogio.parseindex(data, inline)[1]
1483 1485 if nodemap is None:
1484 1486 return
1485 1487
1486 1488 for i in range(count):
1487 1489 for node in nodes:
1488 1490 try:
1489 1491 nodemap[node]
1490 1492 except error.RevlogError:
1491 1493 pass
1492 1494
1493 1495 benches = [
1494 1496 (constructor, b'revlog constructor'),
1495 1497 (read, b'read'),
1496 1498 (parseindex, b'create index object'),
1497 1499 (lambda: getentry(0), b'retrieve index entry for rev 0'),
1498 1500 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
1499 1501 (lambda: resolvenode(node0), b'look up node at rev 0'),
1500 1502 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
1501 1503 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
1502 1504 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
1503 1505 (lambda: resolvenode(node100), b'look up node at tip'),
1504 1506 # 2x variation is to measure caching impact.
1505 1507 (lambda: resolvenodes(allnodes),
1506 1508 b'look up all nodes (forward)'),
1507 1509 (lambda: resolvenodes(allnodes, 2),
1508 1510 b'look up all nodes 2x (forward)'),
1509 1511 (lambda: resolvenodes(allnodesrev),
1510 1512 b'look up all nodes (reverse)'),
1511 1513 (lambda: resolvenodes(allnodesrev, 2),
1512 1514 b'look up all nodes 2x (reverse)'),
1513 1515 (lambda: getentries(allrevs),
1514 1516 b'retrieve all index entries (forward)'),
1515 1517 (lambda: getentries(allrevs, 2),
1516 1518 b'retrieve all index entries 2x (forward)'),
1517 1519 (lambda: getentries(allrevsrev),
1518 1520 b'retrieve all index entries (reverse)'),
1519 1521 (lambda: getentries(allrevsrev, 2),
1520 1522 b'retrieve all index entries 2x (reverse)'),
1521 1523 ]
1522 1524
1523 1525 for fn, title in benches:
1524 1526 timer, fm = gettimer(ui, opts)
1525 1527 timer(fn, title=title)
1526 1528 fm.end()
1527 1529
1528 1530 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
1529 1531 [(b'd', b'dist', 100, b'distance between the revisions'),
1530 1532 (b's', b'startrev', 0, b'revision to start reading at'),
1531 1533 (b'', b'reverse', False, b'read in reverse')],
1532 1534 b'-c|-m|FILE')
1533 1535 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
1534 1536 **opts):
1535 1537 """Benchmark reading a series of revisions from a revlog.
1536 1538
1537 1539 By default, we read every ``-d/--dist`` revision from 0 to tip of
1538 1540 the specified revlog.
1539 1541
1540 1542 The start revision can be defined via ``-s/--startrev``.
1541 1543 """
1542 1544 opts = _byteskwargs(opts)
1543 1545
1544 1546 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
1545 1547 rllen = getlen(ui)(rl)
1546 1548
1547 1549 if startrev < 0:
1548 1550 startrev = rllen + startrev
1549 1551
1550 1552 def d():
1551 1553 rl.clearcaches()
1552 1554
1553 1555 beginrev = startrev
1554 1556 endrev = rllen
1555 1557 dist = opts[b'dist']
1556 1558
1557 1559 if reverse:
1558 1560 beginrev, endrev = endrev - 1, beginrev - 1
1559 1561 dist = -1 * dist
1560 1562
1561 1563 for x in _xrange(beginrev, endrev, dist):
1562 1564 # Old revisions don't support passing int.
1563 1565 n = rl.node(x)
1564 1566 rl.revision(n)
1565 1567
1566 1568 timer, fm = gettimer(ui, opts)
1567 1569 timer(d)
1568 1570 fm.end()
1569 1571
1570 1572 @command(b'perfrevlogwrite', revlogopts + formatteropts +
1571 1573 [(b's', b'startrev', 1000, b'revision to start writing at'),
1572 1574 (b'', b'stoprev', -1, b'last revision to write'),
1573 1575 (b'', b'count', 3, b'last revision to write'),
1574 1576 (b'', b'details', False, b'print timing for every revisions tested'),
1575 1577 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
1576 1578 (b'', b'lazydeltabase', True, b'try the provided delta first'),
1577 1579 ],
1578 1580 b'-c|-m|FILE')
1579 1581 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
1580 1582 """Benchmark writing a series of revisions to a revlog.
1581 1583
1582 1584 Possible source values are:
1583 1585 * `full`: add from a full text (default).
1584 1586 * `parent-1`: add from a delta to the first parent
1585 1587 * `parent-2`: add from a delta to the second parent if it exists
1586 1588 (use a delta from the first parent otherwise)
1587 1589 * `parent-smallest`: add from the smallest delta (either p1 or p2)
1588 1590 * `storage`: add from the existing precomputed deltas
1589 1591 """
1590 1592 opts = _byteskwargs(opts)
1591 1593
1592 1594 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
1593 1595 rllen = getlen(ui)(rl)
1594 1596 if startrev < 0:
1595 1597 startrev = rllen + startrev
1596 1598 if stoprev < 0:
1597 1599 stoprev = rllen + stoprev
1598 1600
1599 1601 lazydeltabase = opts['lazydeltabase']
1600 1602 source = opts['source']
1601 1603 validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest',
1602 1604 b'storage')
1603 1605 if source not in validsource:
1604 1606 raise error.Abort('invalid source type: %s' % source)
1605 1607
1606 1608 ### actually gather results
1607 1609 count = opts['count']
1608 1610 if count <= 0:
1609 1611 raise error.Abort('invalide run count: %d' % count)
1610 1612 allresults = []
1611 1613 for c in range(count):
1612 1614 timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1,
1613 1615 lazydeltabase=lazydeltabase)
1614 1616 allresults.append(timing)
1615 1617
1616 1618 ### consolidate the results in a single list
1617 1619 results = []
1618 1620 for idx, (rev, t) in enumerate(allresults[0]):
1619 1621 ts = [t]
1620 1622 for other in allresults[1:]:
1621 1623 orev, ot = other[idx]
1622 1624 assert orev == rev
1623 1625 ts.append(ot)
1624 1626 results.append((rev, ts))
1625 1627 resultcount = len(results)
1626 1628
1627 1629 ### Compute and display relevant statistics
1628 1630
1629 1631 # get a formatter
1630 1632 fm = ui.formatter(b'perf', opts)
1631 1633 displayall = ui.configbool(b"perf", b"all-timing", False)
1632 1634
1633 1635 # print individual details if requested
1634 1636 if opts['details']:
1635 1637 for idx, item in enumerate(results, 1):
1636 1638 rev, data = item
1637 1639 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
1638 1640 formatone(fm, data, title=title, displayall=displayall)
1639 1641
1640 1642 # sorts results by median time
1641 1643 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
1642 1644 # list of (name, index) to display)
1643 1645 relevants = [
1644 1646 ("min", 0),
1645 1647 ("10%", resultcount * 10 // 100),
1646 1648 ("25%", resultcount * 25 // 100),
1647 1649 ("50%", resultcount * 70 // 100),
1648 1650 ("75%", resultcount * 75 // 100),
1649 1651 ("90%", resultcount * 90 // 100),
1650 1652 ("95%", resultcount * 95 // 100),
1651 1653 ("99%", resultcount * 99 // 100),
1652 1654 ("max", -1),
1653 1655 ]
1654 1656 if not ui.quiet:
1655 1657 for name, idx in relevants:
1656 1658 data = results[idx]
1657 1659 title = '%s of %d, rev %d' % (name, resultcount, data[0])
1658 1660 formatone(fm, data[1], title=title, displayall=displayall)
1659 1661
1660 1662 # XXX summing that many float will not be very precise, we ignore this fact
1661 1663 # for now
1662 1664 totaltime = []
1663 1665 for item in allresults:
1664 1666 totaltime.append((sum(x[1][0] for x in item),
1665 1667 sum(x[1][1] for x in item),
1666 1668 sum(x[1][2] for x in item),)
1667 1669 )
1668 1670 formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
1669 1671 displayall=displayall)
1670 1672 fm.end()
1671 1673
1672 1674 class _faketr(object):
1673 1675 def add(s, x, y, z=None):
1674 1676 return None
1675 1677
1676 1678 def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None,
1677 1679 lazydeltabase=True):
1678 1680 timings = []
1679 1681 tr = _faketr()
1680 1682 with _temprevlog(ui, orig, startrev) as dest:
1681 1683 dest._lazydeltabase = lazydeltabase
1682 1684 revs = list(orig.revs(startrev, stoprev))
1683 1685 total = len(revs)
1684 1686 topic = 'adding'
1685 1687 if runidx is not None:
1686 1688 topic += ' (run #%d)' % runidx
1687 1689 for idx, rev in enumerate(revs):
1688 1690 ui.progress(topic, idx, unit='revs', total=total)
1689 1691 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
1690 1692 with timeone() as r:
1691 1693 dest.addrawrevision(*addargs, **addkwargs)
1692 1694 timings.append((rev, r[0]))
1693 1695 ui.progress(topic, total, unit='revs', total=total)
1694 1696 ui.progress(topic, None, unit='revs', total=total)
1695 1697 return timings
1696 1698
1697 1699 def _getrevisionseed(orig, rev, tr, source):
1698 1700 from mercurial.node import nullid
1699 1701
1700 1702 linkrev = orig.linkrev(rev)
1701 1703 node = orig.node(rev)
1702 1704 p1, p2 = orig.parents(node)
1703 1705 flags = orig.flags(rev)
1704 1706 cachedelta = None
1705 1707 text = None
1706 1708
1707 1709 if source == b'full':
1708 1710 text = orig.revision(rev)
1709 1711 elif source == b'parent-1':
1710 1712 baserev = orig.rev(p1)
1711 1713 cachedelta = (baserev, orig.revdiff(p1, rev))
1712 1714 elif source == b'parent-2':
1713 1715 parent = p2
1714 1716 if p2 == nullid:
1715 1717 parent = p1
1716 1718 baserev = orig.rev(parent)
1717 1719 cachedelta = (baserev, orig.revdiff(parent, rev))
1718 1720 elif source == b'parent-smallest':
1719 1721 p1diff = orig.revdiff(p1, rev)
1720 1722 parent = p1
1721 1723 diff = p1diff
1722 1724 if p2 != nullid:
1723 1725 p2diff = orig.revdiff(p2, rev)
1724 1726 if len(p1diff) > len(p2diff):
1725 1727 parent = p2
1726 1728 diff = p2diff
1727 1729 baserev = orig.rev(parent)
1728 1730 cachedelta = (baserev, diff)
1729 1731 elif source == b'storage':
1730 1732 baserev = orig.deltaparent(rev)
1731 1733 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
1732 1734
1733 1735 return ((text, tr, linkrev, p1, p2),
1734 1736 {'node': node, 'flags': flags, 'cachedelta': cachedelta})
1735 1737
1736 1738 @contextlib.contextmanager
1737 1739 def _temprevlog(ui, orig, truncaterev):
1738 1740 from mercurial import vfs as vfsmod
1739 1741
1740 1742 if orig._inline:
1741 1743 raise error.Abort('not supporting inline revlog (yet)')
1742 1744
1743 1745 origindexpath = orig.opener.join(orig.indexfile)
1744 1746 origdatapath = orig.opener.join(orig.datafile)
1745 1747 indexname = 'revlog.i'
1746 1748 dataname = 'revlog.d'
1747 1749
1748 1750 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
1749 1751 try:
1750 1752 # copy the data file in a temporary directory
1751 1753 ui.debug('copying data in %s\n' % tmpdir)
1752 1754 destindexpath = os.path.join(tmpdir, 'revlog.i')
1753 1755 destdatapath = os.path.join(tmpdir, 'revlog.d')
1754 1756 shutil.copyfile(origindexpath, destindexpath)
1755 1757 shutil.copyfile(origdatapath, destdatapath)
1756 1758
1757 1759 # remove the data we want to add again
1758 1760 ui.debug('truncating data to be rewritten\n')
1759 1761 with open(destindexpath, 'ab') as index:
1760 1762 index.seek(0)
1761 1763 index.truncate(truncaterev * orig._io.size)
1762 1764 with open(destdatapath, 'ab') as data:
1763 1765 data.seek(0)
1764 1766 data.truncate(orig.start(truncaterev))
1765 1767
1766 1768 # instantiate a new revlog from the temporary copy
1767 1769 ui.debug('truncating adding to be rewritten\n')
1768 1770 vfs = vfsmod.vfs(tmpdir)
1769 1771 vfs.options = getattr(orig.opener, 'options', None)
1770 1772
1771 1773 dest = revlog.revlog(vfs,
1772 1774 indexfile=indexname,
1773 1775 datafile=dataname)
1774 1776 if dest._inline:
1775 1777 raise error.Abort('not supporting inline revlog (yet)')
1776 1778 # make sure internals are initialized
1777 1779 dest.revision(len(dest) - 1)
1778 1780 yield dest
1779 1781 del dest, vfs
1780 1782 finally:
1781 1783 shutil.rmtree(tmpdir, True)
1782 1784
1783 1785 @command(b'perfrevlogchunks', revlogopts + formatteropts +
1784 1786 [(b'e', b'engines', b'', b'compression engines to use'),
1785 1787 (b's', b'startrev', 0, b'revision to start at')],
1786 1788 b'-c|-m|FILE')
1787 1789 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
1788 1790 """Benchmark operations on revlog chunks.
1789 1791
1790 1792 Logically, each revlog is a collection of fulltext revisions. However,
1791 1793 stored within each revlog are "chunks" of possibly compressed data. This
1792 1794 data needs to be read and decompressed or compressed and written.
1793 1795
1794 1796 This command measures the time it takes to read+decompress and recompress
1795 1797 chunks in a revlog. It effectively isolates I/O and compression performance.
1796 1798 For measurements of higher-level operations like resolving revisions,
1797 1799 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
1798 1800 """
1799 1801 opts = _byteskwargs(opts)
1800 1802
1801 1803 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
1802 1804
1803 1805 # _chunkraw was renamed to _getsegmentforrevs.
1804 1806 try:
1805 1807 segmentforrevs = rl._getsegmentforrevs
1806 1808 except AttributeError:
1807 1809 segmentforrevs = rl._chunkraw
1808 1810
1809 1811 # Verify engines argument.
1810 1812 if engines:
1811 1813 engines = set(e.strip() for e in engines.split(b','))
1812 1814 for engine in engines:
1813 1815 try:
1814 1816 util.compressionengines[engine]
1815 1817 except KeyError:
1816 1818 raise error.Abort(b'unknown compression engine: %s' % engine)
1817 1819 else:
1818 1820 engines = []
1819 1821 for e in util.compengines:
1820 1822 engine = util.compengines[e]
1821 1823 try:
1822 1824 if engine.available():
1823 1825 engine.revlogcompressor().compress(b'dummy')
1824 1826 engines.append(e)
1825 1827 except NotImplementedError:
1826 1828 pass
1827 1829
1828 1830 revs = list(rl.revs(startrev, len(rl) - 1))
1829 1831
1830 1832 def rlfh(rl):
1831 1833 if rl._inline:
1832 1834 return getsvfs(repo)(rl.indexfile)
1833 1835 else:
1834 1836 return getsvfs(repo)(rl.datafile)
1835 1837
1836 1838 def doread():
1837 1839 rl.clearcaches()
1838 1840 for rev in revs:
1839 1841 segmentforrevs(rev, rev)
1840 1842
1841 1843 def doreadcachedfh():
1842 1844 rl.clearcaches()
1843 1845 fh = rlfh(rl)
1844 1846 for rev in revs:
1845 1847 segmentforrevs(rev, rev, df=fh)
1846 1848
1847 1849 def doreadbatch():
1848 1850 rl.clearcaches()
1849 1851 segmentforrevs(revs[0], revs[-1])
1850 1852
1851 1853 def doreadbatchcachedfh():
1852 1854 rl.clearcaches()
1853 1855 fh = rlfh(rl)
1854 1856 segmentforrevs(revs[0], revs[-1], df=fh)
1855 1857
1856 1858 def dochunk():
1857 1859 rl.clearcaches()
1858 1860 fh = rlfh(rl)
1859 1861 for rev in revs:
1860 1862 rl._chunk(rev, df=fh)
1861 1863
1862 1864 chunks = [None]
1863 1865
1864 1866 def dochunkbatch():
1865 1867 rl.clearcaches()
1866 1868 fh = rlfh(rl)
1867 1869 # Save chunks as a side-effect.
1868 1870 chunks[0] = rl._chunks(revs, df=fh)
1869 1871
1870 1872 def docompress(compressor):
1871 1873 rl.clearcaches()
1872 1874
1873 1875 try:
1874 1876 # Swap in the requested compression engine.
1875 1877 oldcompressor = rl._compressor
1876 1878 rl._compressor = compressor
1877 1879 for chunk in chunks[0]:
1878 1880 rl.compress(chunk)
1879 1881 finally:
1880 1882 rl._compressor = oldcompressor
1881 1883
1882 1884 benches = [
1883 1885 (lambda: doread(), b'read'),
1884 1886 (lambda: doreadcachedfh(), b'read w/ reused fd'),
1885 1887 (lambda: doreadbatch(), b'read batch'),
1886 1888 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
1887 1889 (lambda: dochunk(), b'chunk'),
1888 1890 (lambda: dochunkbatch(), b'chunk batch'),
1889 1891 ]
1890 1892
1891 1893 for engine in sorted(engines):
1892 1894 compressor = util.compengines[engine].revlogcompressor()
1893 1895 benches.append((functools.partial(docompress, compressor),
1894 1896 b'compress w/ %s' % engine))
1895 1897
1896 1898 for fn, title in benches:
1897 1899 timer, fm = gettimer(ui, opts)
1898 1900 timer(fn, title=title)
1899 1901 fm.end()
1900 1902
1901 1903 @command(b'perfrevlogrevision', revlogopts + formatteropts +
1902 1904 [(b'', b'cache', False, b'use caches instead of clearing')],
1903 1905 b'-c|-m|FILE REV')
1904 1906 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
1905 1907 """Benchmark obtaining a revlog revision.
1906 1908
1907 1909 Obtaining a revlog revision consists of roughly the following steps:
1908 1910
1909 1911 1. Compute the delta chain
1910 1912 2. Slice the delta chain if applicable
1911 1913 3. Obtain the raw chunks for that delta chain
1912 1914 4. Decompress each raw chunk
1913 1915 5. Apply binary patches to obtain fulltext
1914 1916 6. Verify hash of fulltext
1915 1917
1916 1918 This command measures the time spent in each of these phases.
1917 1919 """
1918 1920 opts = _byteskwargs(opts)
1919 1921
1920 1922 if opts.get(b'changelog') or opts.get(b'manifest'):
1921 1923 file_, rev = None, file_
1922 1924 elif rev is None:
1923 1925 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
1924 1926
1925 1927 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
1926 1928
1927 1929 # _chunkraw was renamed to _getsegmentforrevs.
1928 1930 try:
1929 1931 segmentforrevs = r._getsegmentforrevs
1930 1932 except AttributeError:
1931 1933 segmentforrevs = r._chunkraw
1932 1934
1933 1935 node = r.lookup(rev)
1934 1936 rev = r.rev(node)
1935 1937
1936 1938 def getrawchunks(data, chain):
1937 1939 start = r.start
1938 1940 length = r.length
1939 1941 inline = r._inline
1940 1942 iosize = r._io.size
1941 1943 buffer = util.buffer
1942 1944
1943 1945 chunks = []
1944 1946 ladd = chunks.append
1945 1947 for idx, item in enumerate(chain):
1946 1948 offset = start(item[0])
1947 1949 bits = data[idx]
1948 1950 for rev in item:
1949 1951 chunkstart = start(rev)
1950 1952 if inline:
1951 1953 chunkstart += (rev + 1) * iosize
1952 1954 chunklength = length(rev)
1953 1955 ladd(buffer(bits, chunkstart - offset, chunklength))
1954 1956
1955 1957 return chunks
1956 1958
1957 1959 def dodeltachain(rev):
1958 1960 if not cache:
1959 1961 r.clearcaches()
1960 1962 r._deltachain(rev)
1961 1963
1962 1964 def doread(chain):
1963 1965 if not cache:
1964 1966 r.clearcaches()
1965 1967 for item in slicedchain:
1966 1968 segmentforrevs(item[0], item[-1])
1967 1969
1968 1970 def doslice(r, chain, size):
1969 1971 for s in slicechunk(r, chain, targetsize=size):
1970 1972 pass
1971 1973
1972 1974 def dorawchunks(data, chain):
1973 1975 if not cache:
1974 1976 r.clearcaches()
1975 1977 getrawchunks(data, chain)
1976 1978
1977 1979 def dodecompress(chunks):
1978 1980 decomp = r.decompress
1979 1981 for chunk in chunks:
1980 1982 decomp(chunk)
1981 1983
1982 1984 def dopatch(text, bins):
1983 1985 if not cache:
1984 1986 r.clearcaches()
1985 1987 mdiff.patches(text, bins)
1986 1988
1987 1989 def dohash(text):
1988 1990 if not cache:
1989 1991 r.clearcaches()
1990 1992 r.checkhash(text, node, rev=rev)
1991 1993
1992 1994 def dorevision():
1993 1995 if not cache:
1994 1996 r.clearcaches()
1995 1997 r.revision(node)
1996 1998
1997 1999 try:
1998 2000 from mercurial.revlogutils.deltas import slicechunk
1999 2001 except ImportError:
2000 2002 slicechunk = getattr(revlog, '_slicechunk', None)
2001 2003
2002 2004 size = r.length(rev)
2003 2005 chain = r._deltachain(rev)[0]
2004 2006 if not getattr(r, '_withsparseread', False):
2005 2007 slicedchain = (chain,)
2006 2008 else:
2007 2009 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
2008 2010 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
2009 2011 rawchunks = getrawchunks(data, slicedchain)
2010 2012 bins = r._chunks(chain)
2011 2013 text = bytes(bins[0])
2012 2014 bins = bins[1:]
2013 2015 text = mdiff.patches(text, bins)
2014 2016
2015 2017 benches = [
2016 2018 (lambda: dorevision(), b'full'),
2017 2019 (lambda: dodeltachain(rev), b'deltachain'),
2018 2020 (lambda: doread(chain), b'read'),
2019 2021 ]
2020 2022
2021 2023 if getattr(r, '_withsparseread', False):
2022 2024 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
2023 2025 benches.append(slicing)
2024 2026
2025 2027 benches.extend([
2026 2028 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
2027 2029 (lambda: dodecompress(rawchunks), b'decompress'),
2028 2030 (lambda: dopatch(text, bins), b'patch'),
2029 2031 (lambda: dohash(text), b'hash'),
2030 2032 ])
2031 2033
2032 2034 timer, fm = gettimer(ui, opts)
2033 2035 for fn, title in benches:
2034 2036 timer(fn, title=title)
2035 2037 fm.end()
2036 2038
2037 2039 @command(b'perfrevset',
2038 2040 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
2039 2041 (b'', b'contexts', False, b'obtain changectx for each revision')]
2040 2042 + formatteropts, b"REVSET")
2041 2043 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
2042 2044 """benchmark the execution time of a revset
2043 2045
2044 2046 Use the --clean option if need to evaluate the impact of build volatile
2045 2047 revisions set cache on the revset execution. Volatile cache hold filtered
2046 2048 and obsolete related cache."""
2047 2049 opts = _byteskwargs(opts)
2048 2050
2049 2051 timer, fm = gettimer(ui, opts)
2050 2052 def d():
2051 2053 if clear:
2052 2054 repo.invalidatevolatilesets()
2053 2055 if contexts:
2054 2056 for ctx in repo.set(expr): pass
2055 2057 else:
2056 2058 for r in repo.revs(expr): pass
2057 2059 timer(d)
2058 2060 fm.end()
2059 2061
2060 2062 @command(b'perfvolatilesets',
2061 2063 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
2062 2064 ] + formatteropts)
2063 2065 def perfvolatilesets(ui, repo, *names, **opts):
2064 2066 """benchmark the computation of various volatile set
2065 2067
2066 2068 Volatile set computes element related to filtering and obsolescence."""
2067 2069 opts = _byteskwargs(opts)
2068 2070 timer, fm = gettimer(ui, opts)
2069 2071 repo = repo.unfiltered()
2070 2072
2071 2073 def getobs(name):
2072 2074 def d():
2073 2075 repo.invalidatevolatilesets()
2074 2076 if opts[b'clear_obsstore']:
2075 2077 clearfilecache(repo, b'obsstore')
2076 2078 obsolete.getrevs(repo, name)
2077 2079 return d
2078 2080
2079 2081 allobs = sorted(obsolete.cachefuncs)
2080 2082 if names:
2081 2083 allobs = [n for n in allobs if n in names]
2082 2084
2083 2085 for name in allobs:
2084 2086 timer(getobs(name), title=name)
2085 2087
2086 2088 def getfiltered(name):
2087 2089 def d():
2088 2090 repo.invalidatevolatilesets()
2089 2091 if opts[b'clear_obsstore']:
2090 2092 clearfilecache(repo, b'obsstore')
2091 2093 repoview.filterrevs(repo, name)
2092 2094 return d
2093 2095
2094 2096 allfilter = sorted(repoview.filtertable)
2095 2097 if names:
2096 2098 allfilter = [n for n in allfilter if n in names]
2097 2099
2098 2100 for name in allfilter:
2099 2101 timer(getfiltered(name), title=name)
2100 2102 fm.end()
2101 2103
2102 2104 @command(b'perfbranchmap',
2103 2105 [(b'f', b'full', False,
2104 2106 b'Includes build time of subset'),
2105 2107 (b'', b'clear-revbranch', False,
2106 2108 b'purge the revbranch cache between computation'),
2107 2109 ] + formatteropts)
2108 2110 def perfbranchmap(ui, repo, *filternames, **opts):
2109 2111 """benchmark the update of a branchmap
2110 2112
2111 2113 This benchmarks the full repo.branchmap() call with read and write disabled
2112 2114 """
2113 2115 opts = _byteskwargs(opts)
2114 2116 full = opts.get(b"full", False)
2115 2117 clear_revbranch = opts.get(b"clear_revbranch", False)
2116 2118 timer, fm = gettimer(ui, opts)
2117 2119 def getbranchmap(filtername):
2118 2120 """generate a benchmark function for the filtername"""
2119 2121 if filtername is None:
2120 2122 view = repo
2121 2123 else:
2122 2124 view = repo.filtered(filtername)
2123 2125 def d():
2124 2126 if clear_revbranch:
2125 2127 repo.revbranchcache()._clear()
2126 2128 if full:
2127 2129 view._branchcaches.clear()
2128 2130 else:
2129 2131 view._branchcaches.pop(filtername, None)
2130 2132 view.branchmap()
2131 2133 return d
2132 2134 # add filter in smaller subset to bigger subset
2133 2135 possiblefilters = set(repoview.filtertable)
2134 2136 if filternames:
2135 2137 possiblefilters &= set(filternames)
2136 2138 subsettable = getbranchmapsubsettable()
2137 2139 allfilters = []
2138 2140 while possiblefilters:
2139 2141 for name in possiblefilters:
2140 2142 subset = subsettable.get(name)
2141 2143 if subset not in possiblefilters:
2142 2144 break
2143 2145 else:
2144 2146 assert False, b'subset cycle %s!' % possiblefilters
2145 2147 allfilters.append(name)
2146 2148 possiblefilters.remove(name)
2147 2149
2148 2150 # warm the cache
2149 2151 if not full:
2150 2152 for name in allfilters:
2151 2153 repo.filtered(name).branchmap()
2152 2154 if not filternames or b'unfiltered' in filternames:
2153 2155 # add unfiltered
2154 2156 allfilters.append(None)
2155 2157
2156 2158 branchcacheread = safeattrsetter(branchmap, b'read')
2157 2159 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
2158 2160 branchcacheread.set(lambda repo: None)
2159 2161 branchcachewrite.set(lambda bc, repo: None)
2160 2162 try:
2161 2163 for name in allfilters:
2162 2164 printname = name
2163 2165 if name is None:
2164 2166 printname = b'unfiltered'
2165 2167 timer(getbranchmap(name), title=str(printname))
2166 2168 finally:
2167 2169 branchcacheread.restore()
2168 2170 branchcachewrite.restore()
2169 2171 fm.end()
2170 2172
2171 2173 @command(b'perfbranchmapload', [
2172 2174 (b'f', b'filter', b'', b'Specify repoview filter'),
2173 2175 (b'', b'list', False, b'List brachmap filter caches'),
2174 2176 ] + formatteropts)
2175 2177 def perfbranchmapread(ui, repo, filter=b'', list=False, **opts):
2176 2178 """benchmark reading the branchmap"""
2177 2179 opts = _byteskwargs(opts)
2178 2180
2179 2181 if list:
2180 2182 for name, kind, st in repo.cachevfs.readdir(stat=True):
2181 2183 if name.startswith(b'branch2'):
2182 2184 filtername = name.partition(b'-')[2] or b'unfiltered'
2183 2185 ui.status(b'%s - %s\n'
2184 2186 % (filtername, util.bytecount(st.st_size)))
2185 2187 return
2186 2188 if filter:
2187 2189 repo = repoview.repoview(repo, filter)
2188 2190 else:
2189 2191 repo = repo.unfiltered()
2190 2192 # try once without timer, the filter may not be cached
2191 2193 if branchmap.read(repo) is None:
2192 2194 raise error.Abort(b'No brachmap cached for %s repo'
2193 2195 % (filter or b'unfiltered'))
2194 2196 timer, fm = gettimer(ui, opts)
2195 2197 timer(lambda: branchmap.read(repo) and None)
2196 2198 fm.end()
2197 2199
2198 2200 @command(b'perfloadmarkers')
2199 2201 def perfloadmarkers(ui, repo):
2200 2202 """benchmark the time to parse the on-disk markers for a repo
2201 2203
2202 2204 Result is the number of markers in the repo."""
2203 2205 timer, fm = gettimer(ui)
2204 2206 svfs = getsvfs(repo)
2205 2207 timer(lambda: len(obsolete.obsstore(svfs)))
2206 2208 fm.end()
2207 2209
2208 2210 @command(b'perflrucachedict', formatteropts +
2209 2211 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
2210 2212 (b'', b'mincost', 0, b'smallest cost of items in cache'),
2211 2213 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
2212 2214 (b'', b'size', 4, b'size of cache'),
2213 2215 (b'', b'gets', 10000, b'number of key lookups'),
2214 2216 (b'', b'sets', 10000, b'number of key sets'),
2215 2217 (b'', b'mixed', 10000, b'number of mixed mode operations'),
2216 2218 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
2217 2219 norepo=True)
2218 2220 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
2219 2221 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
2220 2222 opts = _byteskwargs(opts)
2221 2223
2222 2224 def doinit():
2223 2225 for i in _xrange(10000):
2224 2226 util.lrucachedict(size)
2225 2227
2226 2228 costrange = list(range(mincost, maxcost + 1))
2227 2229
2228 2230 values = []
2229 2231 for i in _xrange(size):
2230 2232 values.append(random.randint(0, _maxint))
2231 2233
2232 2234 # Get mode fills the cache and tests raw lookup performance with no
2233 2235 # eviction.
2234 2236 getseq = []
2235 2237 for i in _xrange(gets):
2236 2238 getseq.append(random.choice(values))
2237 2239
2238 2240 def dogets():
2239 2241 d = util.lrucachedict(size)
2240 2242 for v in values:
2241 2243 d[v] = v
2242 2244 for key in getseq:
2243 2245 value = d[key]
2244 2246 value # silence pyflakes warning
2245 2247
2246 2248 def dogetscost():
2247 2249 d = util.lrucachedict(size, maxcost=costlimit)
2248 2250 for i, v in enumerate(values):
2249 2251 d.insert(v, v, cost=costs[i])
2250 2252 for key in getseq:
2251 2253 try:
2252 2254 value = d[key]
2253 2255 value # silence pyflakes warning
2254 2256 except KeyError:
2255 2257 pass
2256 2258
2257 2259 # Set mode tests insertion speed with cache eviction.
2258 2260 setseq = []
2259 2261 costs = []
2260 2262 for i in _xrange(sets):
2261 2263 setseq.append(random.randint(0, _maxint))
2262 2264 costs.append(random.choice(costrange))
2263 2265
2264 2266 def doinserts():
2265 2267 d = util.lrucachedict(size)
2266 2268 for v in setseq:
2267 2269 d.insert(v, v)
2268 2270
2269 2271 def doinsertscost():
2270 2272 d = util.lrucachedict(size, maxcost=costlimit)
2271 2273 for i, v in enumerate(setseq):
2272 2274 d.insert(v, v, cost=costs[i])
2273 2275
2274 2276 def dosets():
2275 2277 d = util.lrucachedict(size)
2276 2278 for v in setseq:
2277 2279 d[v] = v
2278 2280
2279 2281 # Mixed mode randomly performs gets and sets with eviction.
2280 2282 mixedops = []
2281 2283 for i in _xrange(mixed):
2282 2284 r = random.randint(0, 100)
2283 2285 if r < mixedgetfreq:
2284 2286 op = 0
2285 2287 else:
2286 2288 op = 1
2287 2289
2288 2290 mixedops.append((op,
2289 2291 random.randint(0, size * 2),
2290 2292 random.choice(costrange)))
2291 2293
2292 2294 def domixed():
2293 2295 d = util.lrucachedict(size)
2294 2296
2295 2297 for op, v, cost in mixedops:
2296 2298 if op == 0:
2297 2299 try:
2298 2300 d[v]
2299 2301 except KeyError:
2300 2302 pass
2301 2303 else:
2302 2304 d[v] = v
2303 2305
2304 2306 def domixedcost():
2305 2307 d = util.lrucachedict(size, maxcost=costlimit)
2306 2308
2307 2309 for op, v, cost in mixedops:
2308 2310 if op == 0:
2309 2311 try:
2310 2312 d[v]
2311 2313 except KeyError:
2312 2314 pass
2313 2315 else:
2314 2316 d.insert(v, v, cost=cost)
2315 2317
2316 2318 benches = [
2317 2319 (doinit, b'init'),
2318 2320 ]
2319 2321
2320 2322 if costlimit:
2321 2323 benches.extend([
2322 2324 (dogetscost, b'gets w/ cost limit'),
2323 2325 (doinsertscost, b'inserts w/ cost limit'),
2324 2326 (domixedcost, b'mixed w/ cost limit'),
2325 2327 ])
2326 2328 else:
2327 2329 benches.extend([
2328 2330 (dogets, b'gets'),
2329 2331 (doinserts, b'inserts'),
2330 2332 (dosets, b'sets'),
2331 2333 (domixed, b'mixed')
2332 2334 ])
2333 2335
2334 2336 for fn, title in benches:
2335 2337 timer, fm = gettimer(ui, opts)
2336 2338 timer(fn, title=title)
2337 2339 fm.end()
2338 2340
2339 2341 @command(b'perfwrite', formatteropts)
2340 2342 def perfwrite(ui, repo, **opts):
2341 2343 """microbenchmark ui.write
2342 2344 """
2343 2345 opts = _byteskwargs(opts)
2344 2346
2345 2347 timer, fm = gettimer(ui, opts)
2346 2348 def write():
2347 2349 for i in range(100000):
2348 2350 ui.write((b'Testing write performance\n'))
2349 2351 timer(write)
2350 2352 fm.end()
2351 2353
2352 2354 def uisetup(ui):
2353 2355 if (util.safehasattr(cmdutil, b'openrevlog') and
2354 2356 not util.safehasattr(commands, b'debugrevlogopts')):
2355 2357 # for "historical portability":
2356 2358 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
2357 2359 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
2358 2360 # openrevlog() should cause failure, because it has been
2359 2361 # available since 3.5 (or 49c583ca48c4).
2360 2362 def openrevlog(orig, repo, cmd, file_, opts):
2361 2363 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
2362 2364 raise error.Abort(b"This version doesn't support --dir option",
2363 2365 hint=b"use 3.5 or later")
2364 2366 return orig(repo, cmd, file_, opts)
2365 2367 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
General Comments 0
You need to be logged in to leave comments. Login now