##// END OF EJS Templates
perf: make `clearfilecache` helper work with any object...
Boris Feld -
r40719:d7936a9d default
parent child Browse files
Show More
@@ -1,2370 +1,2372 b''
1 1 # perf.py - performance test routines
2 2 '''helper extension to measure performance'''
3 3
4 4 # "historical portability" policy of perf.py:
5 5 #
6 6 # We have to do:
7 7 # - make perf.py "loadable" with as wide Mercurial version as possible
8 8 # This doesn't mean that perf commands work correctly with that Mercurial.
9 9 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
10 10 # - make historical perf command work correctly with as wide Mercurial
11 11 # version as possible
12 12 #
13 13 # We have to do, if possible with reasonable cost:
14 14 # - make recent perf command for historical feature work correctly
15 15 # with early Mercurial
16 16 #
17 17 # We don't have to do:
18 18 # - make perf command for recent feature work correctly with early
19 19 # Mercurial
20 20
21 21 from __future__ import absolute_import
22 22 import contextlib
23 23 import functools
24 24 import gc
25 25 import os
26 26 import random
27 27 import shutil
28 28 import struct
29 29 import sys
30 30 import tempfile
31 31 import threading
32 32 import time
33 33 from mercurial import (
34 34 changegroup,
35 35 cmdutil,
36 36 commands,
37 37 copies,
38 38 error,
39 39 extensions,
40 40 mdiff,
41 41 merge,
42 42 revlog,
43 43 util,
44 44 )
45 45
46 46 # for "historical portability":
47 47 # try to import modules separately (in dict order), and ignore
48 48 # failure, because these aren't available with early Mercurial
49 49 try:
50 50 from mercurial import branchmap # since 2.5 (or bcee63733aad)
51 51 except ImportError:
52 52 pass
53 53 try:
54 54 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
55 55 except ImportError:
56 56 pass
57 57 try:
58 58 from mercurial import registrar # since 3.7 (or 37d50250b696)
59 59 dir(registrar) # forcibly load it
60 60 except ImportError:
61 61 registrar = None
62 62 try:
63 63 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
64 64 except ImportError:
65 65 pass
66 66 try:
67 67 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
68 68 except ImportError:
69 69 pass
70 70
71 71 def identity(a):
72 72 return a
73 73
74 74 try:
75 75 from mercurial import pycompat
76 76 getargspec = pycompat.getargspec # added to module after 4.5
77 77 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
78 78 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
79 79 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
80 80 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
81 81 if pycompat.ispy3:
82 82 _maxint = sys.maxsize # per py3 docs for replacing maxint
83 83 else:
84 84 _maxint = sys.maxint
85 85 except (ImportError, AttributeError):
86 86 import inspect
87 87 getargspec = inspect.getargspec
88 88 _byteskwargs = identity
89 89 fsencode = identity # no py3 support
90 90 _maxint = sys.maxint # no py3 support
91 91 _sysstr = lambda x: x # no py3 support
92 92 _xrange = xrange
93 93
94 94 try:
95 95 # 4.7+
96 96 queue = pycompat.queue.Queue
97 97 except (AttributeError, ImportError):
98 98 # <4.7.
99 99 try:
100 100 queue = pycompat.queue
101 101 except (AttributeError, ImportError):
102 102 queue = util.queue
103 103
104 104 try:
105 105 from mercurial import logcmdutil
106 106 makelogtemplater = logcmdutil.maketemplater
107 107 except (AttributeError, ImportError):
108 108 try:
109 109 makelogtemplater = cmdutil.makelogtemplater
110 110 except (AttributeError, ImportError):
111 111 makelogtemplater = None
112 112
113 113 # for "historical portability":
114 114 # define util.safehasattr forcibly, because util.safehasattr has been
115 115 # available since 1.9.3 (or 94b200a11cf7)
116 116 _undefined = object()
117 117 def safehasattr(thing, attr):
118 118 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
119 119 setattr(util, 'safehasattr', safehasattr)
120 120
121 121 # for "historical portability":
122 122 # define util.timer forcibly, because util.timer has been available
123 123 # since ae5d60bb70c9
124 124 if safehasattr(time, 'perf_counter'):
125 125 util.timer = time.perf_counter
126 126 elif os.name == b'nt':
127 127 util.timer = time.clock
128 128 else:
129 129 util.timer = time.time
130 130
131 131 # for "historical portability":
132 132 # use locally defined empty option list, if formatteropts isn't
133 133 # available, because commands.formatteropts has been available since
134 134 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
135 135 # available since 2.2 (or ae5f92e154d3)
136 136 formatteropts = getattr(cmdutil, "formatteropts",
137 137 getattr(commands, "formatteropts", []))
138 138
139 139 # for "historical portability":
140 140 # use locally defined option list, if debugrevlogopts isn't available,
141 141 # because commands.debugrevlogopts has been available since 3.7 (or
142 142 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
143 143 # since 1.9 (or a79fea6b3e77).
144 144 revlogopts = getattr(cmdutil, "debugrevlogopts",
145 145 getattr(commands, "debugrevlogopts", [
146 146 (b'c', b'changelog', False, (b'open changelog')),
147 147 (b'm', b'manifest', False, (b'open manifest')),
148 148 (b'', b'dir', False, (b'open directory manifest')),
149 149 ]))
150 150
151 151 cmdtable = {}
152 152
153 153 # for "historical portability":
154 154 # define parsealiases locally, because cmdutil.parsealiases has been
155 155 # available since 1.5 (or 6252852b4332)
156 156 def parsealiases(cmd):
157 157 return cmd.split(b"|")
158 158
159 159 if safehasattr(registrar, 'command'):
160 160 command = registrar.command(cmdtable)
161 161 elif safehasattr(cmdutil, 'command'):
162 162 command = cmdutil.command(cmdtable)
163 163 if b'norepo' not in getargspec(command).args:
164 164 # for "historical portability":
165 165 # wrap original cmdutil.command, because "norepo" option has
166 166 # been available since 3.1 (or 75a96326cecb)
167 167 _command = command
168 168 def command(name, options=(), synopsis=None, norepo=False):
169 169 if norepo:
170 170 commands.norepo += b' %s' % b' '.join(parsealiases(name))
171 171 return _command(name, list(options), synopsis)
172 172 else:
173 173 # for "historical portability":
174 174 # define "@command" annotation locally, because cmdutil.command
175 175 # has been available since 1.9 (or 2daa5179e73f)
176 176 def command(name, options=(), synopsis=None, norepo=False):
177 177 def decorator(func):
178 178 if synopsis:
179 179 cmdtable[name] = func, list(options), synopsis
180 180 else:
181 181 cmdtable[name] = func, list(options)
182 182 if norepo:
183 183 commands.norepo += b' %s' % b' '.join(parsealiases(name))
184 184 return func
185 185 return decorator
186 186
187 187 try:
188 188 import mercurial.registrar
189 189 import mercurial.configitems
190 190 configtable = {}
191 191 configitem = mercurial.registrar.configitem(configtable)
192 192 configitem(b'perf', b'presleep',
193 193 default=mercurial.configitems.dynamicdefault,
194 194 )
195 195 configitem(b'perf', b'stub',
196 196 default=mercurial.configitems.dynamicdefault,
197 197 )
198 198 configitem(b'perf', b'parentscount',
199 199 default=mercurial.configitems.dynamicdefault,
200 200 )
201 201 configitem(b'perf', b'all-timing',
202 202 default=mercurial.configitems.dynamicdefault,
203 203 )
204 204 except (ImportError, AttributeError):
205 205 pass
206 206
207 207 def getlen(ui):
208 208 if ui.configbool(b"perf", b"stub", False):
209 209 return lambda x: 1
210 210 return len
211 211
212 212 def gettimer(ui, opts=None):
213 213 """return a timer function and formatter: (timer, formatter)
214 214
215 215 This function exists to gather the creation of formatter in a single
216 216 place instead of duplicating it in all performance commands."""
217 217
218 218 # enforce an idle period before execution to counteract power management
219 219 # experimental config: perf.presleep
220 220 time.sleep(getint(ui, b"perf", b"presleep", 1))
221 221
222 222 if opts is None:
223 223 opts = {}
224 224 # redirect all to stderr unless buffer api is in use
225 225 if not ui._buffers:
226 226 ui = ui.copy()
227 227 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
228 228 if uifout:
229 229 # for "historical portability":
230 230 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
231 231 uifout.set(ui.ferr)
232 232
233 233 # get a formatter
234 234 uiformatter = getattr(ui, 'formatter', None)
235 235 if uiformatter:
236 236 fm = uiformatter(b'perf', opts)
237 237 else:
238 238 # for "historical portability":
239 239 # define formatter locally, because ui.formatter has been
240 240 # available since 2.2 (or ae5f92e154d3)
241 241 from mercurial import node
242 242 class defaultformatter(object):
243 243 """Minimized composition of baseformatter and plainformatter
244 244 """
245 245 def __init__(self, ui, topic, opts):
246 246 self._ui = ui
247 247 if ui.debugflag:
248 248 self.hexfunc = node.hex
249 249 else:
250 250 self.hexfunc = node.short
251 251 def __nonzero__(self):
252 252 return False
253 253 __bool__ = __nonzero__
254 254 def startitem(self):
255 255 pass
256 256 def data(self, **data):
257 257 pass
258 258 def write(self, fields, deftext, *fielddata, **opts):
259 259 self._ui.write(deftext % fielddata, **opts)
260 260 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
261 261 if cond:
262 262 self._ui.write(deftext % fielddata, **opts)
263 263 def plain(self, text, **opts):
264 264 self._ui.write(text, **opts)
265 265 def end(self):
266 266 pass
267 267 fm = defaultformatter(ui, b'perf', opts)
268 268
269 269 # stub function, runs code only once instead of in a loop
270 270 # experimental config: perf.stub
271 271 if ui.configbool(b"perf", b"stub", False):
272 272 return functools.partial(stub_timer, fm), fm
273 273
274 274 # experimental config: perf.all-timing
275 275 displayall = ui.configbool(b"perf", b"all-timing", False)
276 276 return functools.partial(_timer, fm, displayall=displayall), fm
277 277
278 278 def stub_timer(fm, func, setup=None, title=None):
279 279 func()
280 280
281 281 @contextlib.contextmanager
282 282 def timeone():
283 283 r = []
284 284 ostart = os.times()
285 285 cstart = util.timer()
286 286 yield r
287 287 cstop = util.timer()
288 288 ostop = os.times()
289 289 a, b = ostart, ostop
290 290 r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
291 291
292 292 def _timer(fm, func, setup=None, title=None, displayall=False):
293 293 gc.collect()
294 294 results = []
295 295 begin = util.timer()
296 296 count = 0
297 297 while True:
298 298 if setup is not None:
299 299 setup()
300 300 with timeone() as item:
301 301 r = func()
302 302 count += 1
303 303 results.append(item[0])
304 304 cstop = util.timer()
305 305 if cstop - begin > 3 and count >= 100:
306 306 break
307 307 if cstop - begin > 10 and count >= 3:
308 308 break
309 309
310 310 formatone(fm, results, title=title, result=r,
311 311 displayall=displayall)
312 312
313 313 def formatone(fm, timings, title=None, result=None, displayall=False):
314 314
315 315 count = len(timings)
316 316
317 317 fm.startitem()
318 318
319 319 if title:
320 320 fm.write(b'title', b'! %s\n', title)
321 321 if result:
322 322 fm.write(b'result', b'! result: %s\n', result)
323 323 def display(role, entry):
324 324 prefix = b''
325 325 if role != b'best':
326 326 prefix = b'%s.' % role
327 327 fm.plain(b'!')
328 328 fm.write(prefix + b'wall', b' wall %f', entry[0])
329 329 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
330 330 fm.write(prefix + b'user', b' user %f', entry[1])
331 331 fm.write(prefix + b'sys', b' sys %f', entry[2])
332 332 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
333 333 fm.plain(b'\n')
334 334 timings.sort()
335 335 min_val = timings[0]
336 336 display(b'best', min_val)
337 337 if displayall:
338 338 max_val = timings[-1]
339 339 display(b'max', max_val)
340 340 avg = tuple([sum(x) / count for x in zip(*timings)])
341 341 display(b'avg', avg)
342 342 median = timings[len(timings) // 2]
343 343 display(b'median', median)
344 344
345 345 # utilities for historical portability
346 346
347 347 def getint(ui, section, name, default):
348 348 # for "historical portability":
349 349 # ui.configint has been available since 1.9 (or fa2b596db182)
350 350 v = ui.config(section, name, None)
351 351 if v is None:
352 352 return default
353 353 try:
354 354 return int(v)
355 355 except ValueError:
356 356 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
357 357 % (section, name, v))
358 358
359 359 def safeattrsetter(obj, name, ignoremissing=False):
360 360 """Ensure that 'obj' has 'name' attribute before subsequent setattr
361 361
362 362 This function is aborted, if 'obj' doesn't have 'name' attribute
363 363 at runtime. This avoids overlooking removal of an attribute, which
364 364 breaks assumption of performance measurement, in the future.
365 365
366 366 This function returns the object to (1) assign a new value, and
367 367 (2) restore an original value to the attribute.
368 368
369 369 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
370 370 abortion, and this function returns None. This is useful to
371 371 examine an attribute, which isn't ensured in all Mercurial
372 372 versions.
373 373 """
374 374 if not util.safehasattr(obj, name):
375 375 if ignoremissing:
376 376 return None
377 377 raise error.Abort((b"missing attribute %s of %s might break assumption"
378 378 b" of performance measurement") % (name, obj))
379 379
380 380 origvalue = getattr(obj, _sysstr(name))
381 381 class attrutil(object):
382 382 def set(self, newvalue):
383 383 setattr(obj, _sysstr(name), newvalue)
384 384 def restore(self):
385 385 setattr(obj, _sysstr(name), origvalue)
386 386
387 387 return attrutil()
388 388
389 389 # utilities to examine each internal API changes
390 390
391 391 def getbranchmapsubsettable():
392 392 # for "historical portability":
393 393 # subsettable is defined in:
394 394 # - branchmap since 2.9 (or 175c6fd8cacc)
395 395 # - repoview since 2.5 (or 59a9f18d4587)
396 396 for mod in (branchmap, repoview):
397 397 subsettable = getattr(mod, 'subsettable', None)
398 398 if subsettable:
399 399 return subsettable
400 400
401 401 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
402 402 # branchmap and repoview modules exist, but subsettable attribute
403 403 # doesn't)
404 404 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
405 405 hint=b"use 2.5 or later")
406 406
407 407 def getsvfs(repo):
408 408 """Return appropriate object to access files under .hg/store
409 409 """
410 410 # for "historical portability":
411 411 # repo.svfs has been available since 2.3 (or 7034365089bf)
412 412 svfs = getattr(repo, 'svfs', None)
413 413 if svfs:
414 414 return svfs
415 415 else:
416 416 return getattr(repo, 'sopener')
417 417
418 418 def getvfs(repo):
419 419 """Return appropriate object to access files under .hg
420 420 """
421 421 # for "historical portability":
422 422 # repo.vfs has been available since 2.3 (or 7034365089bf)
423 423 vfs = getattr(repo, 'vfs', None)
424 424 if vfs:
425 425 return vfs
426 426 else:
427 427 return getattr(repo, 'opener')
428 428
429 429 def repocleartagscachefunc(repo):
430 430 """Return the function to clear tags cache according to repo internal API
431 431 """
432 432 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
433 433 # in this case, setattr(repo, '_tagscache', None) or so isn't
434 434 # correct way to clear tags cache, because existing code paths
435 435 # expect _tagscache to be a structured object.
436 436 def clearcache():
437 437 # _tagscache has been filteredpropertycache since 2.5 (or
438 438 # 98c867ac1330), and delattr() can't work in such case
439 439 if b'_tagscache' in vars(repo):
440 440 del repo.__dict__[b'_tagscache']
441 441 return clearcache
442 442
443 443 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
444 444 if repotags: # since 1.4 (or 5614a628d173)
445 445 return lambda : repotags.set(None)
446 446
447 447 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
448 448 if repotagscache: # since 0.6 (or d7df759d0e97)
449 449 return lambda : repotagscache.set(None)
450 450
451 451 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
452 452 # this point, but it isn't so problematic, because:
453 453 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
454 454 # in perftags() causes failure soon
455 455 # - perf.py itself has been available since 1.1 (or eb240755386d)
456 456 raise error.Abort((b"tags API of this hg command is unknown"))
457 457
458 458 # utilities to clear cache
459 459
460 def clearfilecache(repo, attrname):
461 unfi = repo.unfiltered()
462 if attrname in vars(unfi):
463 delattr(unfi, attrname)
464 unfi._filecache.pop(attrname, None)
460 def clearfilecache(obj, attrname):
461 unfiltered = getattr(obj, 'unfiltered', None)
462 if unfiltered is not None:
463 obj = obj.unfiltered()
464 if attrname in vars(obj):
465 delattr(obj, attrname)
466 obj._filecache.pop(attrname, None)
465 467
466 468 # perf commands
467 469
468 470 @command(b'perfwalk', formatteropts)
469 471 def perfwalk(ui, repo, *pats, **opts):
470 472 opts = _byteskwargs(opts)
471 473 timer, fm = gettimer(ui, opts)
472 474 m = scmutil.match(repo[None], pats, {})
473 475 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
474 476 ignored=False))))
475 477 fm.end()
476 478
477 479 @command(b'perfannotate', formatteropts)
478 480 def perfannotate(ui, repo, f, **opts):
479 481 opts = _byteskwargs(opts)
480 482 timer, fm = gettimer(ui, opts)
481 483 fc = repo[b'.'][f]
482 484 timer(lambda: len(fc.annotate(True)))
483 485 fm.end()
484 486
485 487 @command(b'perfstatus',
486 488 [(b'u', b'unknown', False,
487 489 b'ask status to look for unknown files')] + formatteropts)
488 490 def perfstatus(ui, repo, **opts):
489 491 opts = _byteskwargs(opts)
490 492 #m = match.always(repo.root, repo.getcwd())
491 493 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
492 494 # False))))
493 495 timer, fm = gettimer(ui, opts)
494 496 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
495 497 fm.end()
496 498
497 499 @command(b'perfaddremove', formatteropts)
498 500 def perfaddremove(ui, repo, **opts):
499 501 opts = _byteskwargs(opts)
500 502 timer, fm = gettimer(ui, opts)
501 503 try:
502 504 oldquiet = repo.ui.quiet
503 505 repo.ui.quiet = True
504 506 matcher = scmutil.match(repo[None])
505 507 opts[b'dry_run'] = True
506 508 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
507 509 finally:
508 510 repo.ui.quiet = oldquiet
509 511 fm.end()
510 512
511 513 def clearcaches(cl):
512 514 # behave somewhat consistently across internal API changes
513 515 if util.safehasattr(cl, b'clearcaches'):
514 516 cl.clearcaches()
515 517 elif util.safehasattr(cl, b'_nodecache'):
516 518 from mercurial.node import nullid, nullrev
517 519 cl._nodecache = {nullid: nullrev}
518 520 cl._nodepos = None
519 521
520 522 @command(b'perfheads', formatteropts)
521 523 def perfheads(ui, repo, **opts):
522 524 opts = _byteskwargs(opts)
523 525 timer, fm = gettimer(ui, opts)
524 526 cl = repo.changelog
525 527 def d():
526 528 len(cl.headrevs())
527 529 clearcaches(cl)
528 530 timer(d)
529 531 fm.end()
530 532
531 533 @command(b'perftags', formatteropts)
532 534 def perftags(ui, repo, **opts):
533 535 import mercurial.changelog
534 536 import mercurial.manifest
535 537
536 538 opts = _byteskwargs(opts)
537 539 timer, fm = gettimer(ui, opts)
538 540 svfs = getsvfs(repo)
539 541 repocleartagscache = repocleartagscachefunc(repo)
540 542 def s():
541 543 repo.changelog = mercurial.changelog.changelog(svfs)
542 544 rootmanifest = mercurial.manifest.manifestrevlog(svfs)
543 545 repo.manifestlog = mercurial.manifest.manifestlog(svfs, repo,
544 546 rootmanifest)
545 547 repocleartagscache()
546 548 def t():
547 549 return len(repo.tags())
548 550 timer(t, setup=s)
549 551 fm.end()
550 552
551 553 @command(b'perfancestors', formatteropts)
552 554 def perfancestors(ui, repo, **opts):
553 555 opts = _byteskwargs(opts)
554 556 timer, fm = gettimer(ui, opts)
555 557 heads = repo.changelog.headrevs()
556 558 def d():
557 559 for a in repo.changelog.ancestors(heads):
558 560 pass
559 561 timer(d)
560 562 fm.end()
561 563
562 564 @command(b'perfancestorset', formatteropts)
563 565 def perfancestorset(ui, repo, revset, **opts):
564 566 opts = _byteskwargs(opts)
565 567 timer, fm = gettimer(ui, opts)
566 568 revs = repo.revs(revset)
567 569 heads = repo.changelog.headrevs()
568 570 def d():
569 571 s = repo.changelog.ancestors(heads)
570 572 for rev in revs:
571 573 rev in s
572 574 timer(d)
573 575 fm.end()
574 576
575 577 @command(b'perfbookmarks', formatteropts)
576 578 def perfbookmarks(ui, repo, **opts):
577 579 """benchmark parsing bookmarks from disk to memory"""
578 580 opts = _byteskwargs(opts)
579 581 timer, fm = gettimer(ui, opts)
580 582
581 583 def s():
582 584 clearfilecache(repo, b'_bookmarks')
583 585 def d():
584 586 repo._bookmarks
585 587 timer(d, setup=s)
586 588 fm.end()
587 589
588 590 @command(b'perfbundleread', formatteropts, b'BUNDLE')
589 591 def perfbundleread(ui, repo, bundlepath, **opts):
590 592 """Benchmark reading of bundle files.
591 593
592 594 This command is meant to isolate the I/O part of bundle reading as
593 595 much as possible.
594 596 """
595 597 from mercurial import (
596 598 bundle2,
597 599 exchange,
598 600 streamclone,
599 601 )
600 602
601 603 opts = _byteskwargs(opts)
602 604
603 605 def makebench(fn):
604 606 def run():
605 607 with open(bundlepath, b'rb') as fh:
606 608 bundle = exchange.readbundle(ui, fh, bundlepath)
607 609 fn(bundle)
608 610
609 611 return run
610 612
611 613 def makereadnbytes(size):
612 614 def run():
613 615 with open(bundlepath, b'rb') as fh:
614 616 bundle = exchange.readbundle(ui, fh, bundlepath)
615 617 while bundle.read(size):
616 618 pass
617 619
618 620 return run
619 621
620 622 def makestdioread(size):
621 623 def run():
622 624 with open(bundlepath, b'rb') as fh:
623 625 while fh.read(size):
624 626 pass
625 627
626 628 return run
627 629
628 630 # bundle1
629 631
630 632 def deltaiter(bundle):
631 633 for delta in bundle.deltaiter():
632 634 pass
633 635
634 636 def iterchunks(bundle):
635 637 for chunk in bundle.getchunks():
636 638 pass
637 639
638 640 # bundle2
639 641
640 642 def forwardchunks(bundle):
641 643 for chunk in bundle._forwardchunks():
642 644 pass
643 645
644 646 def iterparts(bundle):
645 647 for part in bundle.iterparts():
646 648 pass
647 649
648 650 def iterpartsseekable(bundle):
649 651 for part in bundle.iterparts(seekable=True):
650 652 pass
651 653
652 654 def seek(bundle):
653 655 for part in bundle.iterparts(seekable=True):
654 656 part.seek(0, os.SEEK_END)
655 657
656 658 def makepartreadnbytes(size):
657 659 def run():
658 660 with open(bundlepath, b'rb') as fh:
659 661 bundle = exchange.readbundle(ui, fh, bundlepath)
660 662 for part in bundle.iterparts():
661 663 while part.read(size):
662 664 pass
663 665
664 666 return run
665 667
666 668 benches = [
667 669 (makestdioread(8192), b'read(8k)'),
668 670 (makestdioread(16384), b'read(16k)'),
669 671 (makestdioread(32768), b'read(32k)'),
670 672 (makestdioread(131072), b'read(128k)'),
671 673 ]
672 674
673 675 with open(bundlepath, b'rb') as fh:
674 676 bundle = exchange.readbundle(ui, fh, bundlepath)
675 677
676 678 if isinstance(bundle, changegroup.cg1unpacker):
677 679 benches.extend([
678 680 (makebench(deltaiter), b'cg1 deltaiter()'),
679 681 (makebench(iterchunks), b'cg1 getchunks()'),
680 682 (makereadnbytes(8192), b'cg1 read(8k)'),
681 683 (makereadnbytes(16384), b'cg1 read(16k)'),
682 684 (makereadnbytes(32768), b'cg1 read(32k)'),
683 685 (makereadnbytes(131072), b'cg1 read(128k)'),
684 686 ])
685 687 elif isinstance(bundle, bundle2.unbundle20):
686 688 benches.extend([
687 689 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
688 690 (makebench(iterparts), b'bundle2 iterparts()'),
689 691 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
690 692 (makebench(seek), b'bundle2 part seek()'),
691 693 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
692 694 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
693 695 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
694 696 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
695 697 ])
696 698 elif isinstance(bundle, streamclone.streamcloneapplier):
697 699 raise error.Abort(b'stream clone bundles not supported')
698 700 else:
699 701 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
700 702
701 703 for fn, title in benches:
702 704 timer, fm = gettimer(ui, opts)
703 705 timer(fn, title=title)
704 706 fm.end()
705 707
706 708 @command(b'perfchangegroupchangelog', formatteropts +
707 709 [(b'', b'version', b'02', b'changegroup version'),
708 710 (b'r', b'rev', b'', b'revisions to add to changegroup')])
709 711 def perfchangegroupchangelog(ui, repo, version=b'02', rev=None, **opts):
710 712 """Benchmark producing a changelog group for a changegroup.
711 713
712 714 This measures the time spent processing the changelog during a
713 715 bundle operation. This occurs during `hg bundle` and on a server
714 716 processing a `getbundle` wire protocol request (handles clones
715 717 and pull requests).
716 718
717 719 By default, all revisions are added to the changegroup.
718 720 """
719 721 opts = _byteskwargs(opts)
720 722 cl = repo.changelog
721 723 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
722 724 bundler = changegroup.getbundler(version, repo)
723 725
724 726 def d():
725 727 state, chunks = bundler._generatechangelog(cl, nodes)
726 728 for chunk in chunks:
727 729 pass
728 730
729 731 timer, fm = gettimer(ui, opts)
730 732
731 733 # Terminal printing can interfere with timing. So disable it.
732 734 with ui.configoverride({(b'progress', b'disable'): True}):
733 735 timer(d)
734 736
735 737 fm.end()
736 738
737 739 @command(b'perfdirs', formatteropts)
738 740 def perfdirs(ui, repo, **opts):
739 741 opts = _byteskwargs(opts)
740 742 timer, fm = gettimer(ui, opts)
741 743 dirstate = repo.dirstate
742 744 b'a' in dirstate
743 745 def d():
744 746 dirstate.hasdir(b'a')
745 747 del dirstate._map._dirs
746 748 timer(d)
747 749 fm.end()
748 750
749 751 @command(b'perfdirstate', formatteropts)
750 752 def perfdirstate(ui, repo, **opts):
751 753 opts = _byteskwargs(opts)
752 754 timer, fm = gettimer(ui, opts)
753 755 b"a" in repo.dirstate
754 756 def d():
755 757 repo.dirstate.invalidate()
756 758 b"a" in repo.dirstate
757 759 timer(d)
758 760 fm.end()
759 761
760 762 @command(b'perfdirstatedirs', formatteropts)
761 763 def perfdirstatedirs(ui, repo, **opts):
762 764 opts = _byteskwargs(opts)
763 765 timer, fm = gettimer(ui, opts)
764 766 b"a" in repo.dirstate
765 767 def d():
766 768 repo.dirstate.hasdir(b"a")
767 769 del repo.dirstate._map._dirs
768 770 timer(d)
769 771 fm.end()
770 772
771 773 @command(b'perfdirstatefoldmap', formatteropts)
772 774 def perfdirstatefoldmap(ui, repo, **opts):
773 775 opts = _byteskwargs(opts)
774 776 timer, fm = gettimer(ui, opts)
775 777 dirstate = repo.dirstate
776 778 b'a' in dirstate
777 779 def d():
778 780 dirstate._map.filefoldmap.get(b'a')
779 781 del dirstate._map.filefoldmap
780 782 timer(d)
781 783 fm.end()
782 784
783 785 @command(b'perfdirfoldmap', formatteropts)
784 786 def perfdirfoldmap(ui, repo, **opts):
785 787 opts = _byteskwargs(opts)
786 788 timer, fm = gettimer(ui, opts)
787 789 dirstate = repo.dirstate
788 790 b'a' in dirstate
789 791 def d():
790 792 dirstate._map.dirfoldmap.get(b'a')
791 793 del dirstate._map.dirfoldmap
792 794 del dirstate._map._dirs
793 795 timer(d)
794 796 fm.end()
795 797
796 798 @command(b'perfdirstatewrite', formatteropts)
797 799 def perfdirstatewrite(ui, repo, **opts):
798 800 opts = _byteskwargs(opts)
799 801 timer, fm = gettimer(ui, opts)
800 802 ds = repo.dirstate
801 803 b"a" in ds
802 804 def d():
803 805 ds._dirty = True
804 806 ds.write(repo.currenttransaction())
805 807 timer(d)
806 808 fm.end()
807 809
808 810 @command(b'perfmergecalculate',
809 811 [(b'r', b'rev', b'.', b'rev to merge against')] + formatteropts)
810 812 def perfmergecalculate(ui, repo, rev, **opts):
811 813 opts = _byteskwargs(opts)
812 814 timer, fm = gettimer(ui, opts)
813 815 wctx = repo[None]
814 816 rctx = scmutil.revsingle(repo, rev, rev)
815 817 ancestor = wctx.ancestor(rctx)
816 818 # we don't want working dir files to be stat'd in the benchmark, so prime
817 819 # that cache
818 820 wctx.dirty()
819 821 def d():
820 822 # acceptremote is True because we don't want prompts in the middle of
821 823 # our benchmark
822 824 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
823 825 acceptremote=True, followcopies=True)
824 826 timer(d)
825 827 fm.end()
826 828
827 829 @command(b'perfpathcopies', [], b"REV REV")
828 830 def perfpathcopies(ui, repo, rev1, rev2, **opts):
829 831 opts = _byteskwargs(opts)
830 832 timer, fm = gettimer(ui, opts)
831 833 ctx1 = scmutil.revsingle(repo, rev1, rev1)
832 834 ctx2 = scmutil.revsingle(repo, rev2, rev2)
833 835 def d():
834 836 copies.pathcopies(ctx1, ctx2)
835 837 timer(d)
836 838 fm.end()
837 839
838 840 @command(b'perfphases',
839 841 [(b'', b'full', False, b'include file reading time too'),
840 842 ], b"")
841 843 def perfphases(ui, repo, **opts):
842 844 """benchmark phasesets computation"""
843 845 opts = _byteskwargs(opts)
844 846 timer, fm = gettimer(ui, opts)
845 847 _phases = repo._phasecache
846 848 full = opts.get(b'full')
847 849 def d():
848 850 phases = _phases
849 851 if full:
850 852 clearfilecache(repo, b'_phasecache')
851 853 phases = repo._phasecache
852 854 phases.invalidate()
853 855 phases.loadphaserevs(repo)
854 856 timer(d)
855 857 fm.end()
856 858
857 859 @command(b'perfphasesremote',
858 860 [], b"[DEST]")
859 861 def perfphasesremote(ui, repo, dest=None, **opts):
860 862 """benchmark time needed to analyse phases of the remote server"""
861 863 from mercurial.node import (
862 864 bin,
863 865 )
864 866 from mercurial import (
865 867 exchange,
866 868 hg,
867 869 phases,
868 870 )
869 871 opts = _byteskwargs(opts)
870 872 timer, fm = gettimer(ui, opts)
871 873
872 874 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
873 875 if not path:
874 876 raise error.Abort((b'default repository not configured!'),
875 877 hint=(b"see 'hg help config.paths'"))
876 878 dest = path.pushloc or path.loc
877 879 branches = (path.branch, opts.get(b'branch') or [])
878 880 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
879 881 revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get(b'rev'))
880 882 other = hg.peer(repo, opts, dest)
881 883
882 884 # easier to perform discovery through the operation
883 885 op = exchange.pushoperation(repo, other)
884 886 exchange._pushdiscoverychangeset(op)
885 887
886 888 remotesubset = op.fallbackheads
887 889
888 890 with other.commandexecutor() as e:
889 891 remotephases = e.callcommand(b'listkeys',
890 892 {b'namespace': b'phases'}).result()
891 893 del other
892 894 publishing = remotephases.get(b'publishing', False)
893 895 if publishing:
894 896 ui.status((b'publishing: yes\n'))
895 897 else:
896 898 ui.status((b'publishing: no\n'))
897 899
898 900 nodemap = repo.changelog.nodemap
899 901 nonpublishroots = 0
900 902 for nhex, phase in remotephases.iteritems():
901 903 if nhex == b'publishing': # ignore data related to publish option
902 904 continue
903 905 node = bin(nhex)
904 906 if node in nodemap and int(phase):
905 907 nonpublishroots += 1
906 908 ui.status((b'number of roots: %d\n') % len(remotephases))
907 909 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
908 910 def d():
909 911 phases.remotephasessummary(repo,
910 912 remotesubset,
911 913 remotephases)
912 914 timer(d)
913 915 fm.end()
914 916
915 917 @command(b'perfmanifest',[
916 918 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
917 919 (b'', b'clear-disk', False, b'clear on-disk caches too'),
918 920 ] + formatteropts, b'REV|NODE')
919 921 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
920 922 """benchmark the time to read a manifest from disk and return a usable
921 923 dict-like object
922 924
923 925 Manifest caches are cleared before retrieval."""
924 926 opts = _byteskwargs(opts)
925 927 timer, fm = gettimer(ui, opts)
926 928 if not manifest_rev:
927 929 ctx = scmutil.revsingle(repo, rev, rev)
928 930 t = ctx.manifestnode()
929 931 else:
930 932 from mercurial.node import bin
931 933
932 934 if len(rev) == 40:
933 935 t = bin(rev)
934 936 else:
935 937 try:
936 938 rev = int(rev)
937 939
938 940 if util.safehasattr(repo.manifestlog, b'getstorage'):
939 941 t = repo.manifestlog.getstorage(b'').node(rev)
940 942 else:
941 943 t = repo.manifestlog._revlog.lookup(rev)
942 944 except ValueError:
943 945 raise error.Abort(b'manifest revision must be integer or full '
944 946 b'node')
945 947 def d():
946 948 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
947 949 repo.manifestlog[t].read()
948 950 timer(d)
949 951 fm.end()
950 952
951 953 @command(b'perfchangeset', formatteropts)
952 954 def perfchangeset(ui, repo, rev, **opts):
953 955 opts = _byteskwargs(opts)
954 956 timer, fm = gettimer(ui, opts)
955 957 n = scmutil.revsingle(repo, rev).node()
956 958 def d():
957 959 repo.changelog.read(n)
958 960 #repo.changelog._cache = None
959 961 timer(d)
960 962 fm.end()
961 963
962 964 @command(b'perfindex', formatteropts)
963 965 def perfindex(ui, repo, **opts):
964 966 import mercurial.revlog
965 967 opts = _byteskwargs(opts)
966 968 timer, fm = gettimer(ui, opts)
967 969 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
968 970 n = repo[b"tip"].node()
969 971 svfs = getsvfs(repo)
970 972 def d():
971 973 cl = mercurial.revlog.revlog(svfs, b"00changelog.i")
972 974 cl.rev(n)
973 975 timer(d)
974 976 fm.end()
975 977
976 978 @command(b'perfstartup', formatteropts)
977 979 def perfstartup(ui, repo, **opts):
978 980 opts = _byteskwargs(opts)
979 981 timer, fm = gettimer(ui, opts)
980 982 def d():
981 983 if os.name != r'nt':
982 984 os.system(b"HGRCPATH= %s version -q > /dev/null" %
983 985 fsencode(sys.argv[0]))
984 986 else:
985 987 os.environ[r'HGRCPATH'] = r' '
986 988 os.system(r"%s version -q > NUL" % sys.argv[0])
987 989 timer(d)
988 990 fm.end()
989 991
990 992 @command(b'perfparents', formatteropts)
991 993 def perfparents(ui, repo, **opts):
992 994 opts = _byteskwargs(opts)
993 995 timer, fm = gettimer(ui, opts)
994 996 # control the number of commits perfparents iterates over
995 997 # experimental config: perf.parentscount
996 998 count = getint(ui, b"perf", b"parentscount", 1000)
997 999 if len(repo.changelog) < count:
998 1000 raise error.Abort(b"repo needs %d commits for this test" % count)
999 1001 repo = repo.unfiltered()
1000 1002 nl = [repo.changelog.node(i) for i in _xrange(count)]
1001 1003 def d():
1002 1004 for n in nl:
1003 1005 repo.changelog.parents(n)
1004 1006 timer(d)
1005 1007 fm.end()
1006 1008
1007 1009 @command(b'perfctxfiles', formatteropts)
1008 1010 def perfctxfiles(ui, repo, x, **opts):
1009 1011 opts = _byteskwargs(opts)
1010 1012 x = int(x)
1011 1013 timer, fm = gettimer(ui, opts)
1012 1014 def d():
1013 1015 len(repo[x].files())
1014 1016 timer(d)
1015 1017 fm.end()
1016 1018
1017 1019 @command(b'perfrawfiles', formatteropts)
1018 1020 def perfrawfiles(ui, repo, x, **opts):
1019 1021 opts = _byteskwargs(opts)
1020 1022 x = int(x)
1021 1023 timer, fm = gettimer(ui, opts)
1022 1024 cl = repo.changelog
1023 1025 def d():
1024 1026 len(cl.read(x)[3])
1025 1027 timer(d)
1026 1028 fm.end()
1027 1029
1028 1030 @command(b'perflookup', formatteropts)
1029 1031 def perflookup(ui, repo, rev, **opts):
1030 1032 opts = _byteskwargs(opts)
1031 1033 timer, fm = gettimer(ui, opts)
1032 1034 timer(lambda: len(repo.lookup(rev)))
1033 1035 fm.end()
1034 1036
1035 1037 @command(b'perflinelogedits',
1036 1038 [(b'n', b'edits', 10000, b'number of edits'),
1037 1039 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1038 1040 ], norepo=True)
1039 1041 def perflinelogedits(ui, **opts):
1040 1042 from mercurial import linelog
1041 1043
1042 1044 opts = _byteskwargs(opts)
1043 1045
1044 1046 edits = opts[b'edits']
1045 1047 maxhunklines = opts[b'max_hunk_lines']
1046 1048
1047 1049 maxb1 = 100000
1048 1050 random.seed(0)
1049 1051 randint = random.randint
1050 1052 currentlines = 0
1051 1053 arglist = []
1052 1054 for rev in _xrange(edits):
1053 1055 a1 = randint(0, currentlines)
1054 1056 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1055 1057 b1 = randint(0, maxb1)
1056 1058 b2 = randint(b1, b1 + maxhunklines)
1057 1059 currentlines += (b2 - b1) - (a2 - a1)
1058 1060 arglist.append((rev, a1, a2, b1, b2))
1059 1061
1060 1062 def d():
1061 1063 ll = linelog.linelog()
1062 1064 for args in arglist:
1063 1065 ll.replacelines(*args)
1064 1066
1065 1067 timer, fm = gettimer(ui, opts)
1066 1068 timer(d)
1067 1069 fm.end()
1068 1070
1069 1071 @command(b'perfrevrange', formatteropts)
1070 1072 def perfrevrange(ui, repo, *specs, **opts):
1071 1073 opts = _byteskwargs(opts)
1072 1074 timer, fm = gettimer(ui, opts)
1073 1075 revrange = scmutil.revrange
1074 1076 timer(lambda: len(revrange(repo, specs)))
1075 1077 fm.end()
1076 1078
1077 1079 @command(b'perfnodelookup', formatteropts)
1078 1080 def perfnodelookup(ui, repo, rev, **opts):
1079 1081 opts = _byteskwargs(opts)
1080 1082 timer, fm = gettimer(ui, opts)
1081 1083 import mercurial.revlog
1082 1084 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1083 1085 n = scmutil.revsingle(repo, rev).node()
1084 1086 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1085 1087 def d():
1086 1088 cl.rev(n)
1087 1089 clearcaches(cl)
1088 1090 timer(d)
1089 1091 fm.end()
1090 1092
1091 1093 @command(b'perflog',
1092 1094 [(b'', b'rename', False, b'ask log to follow renames')
1093 1095 ] + formatteropts)
1094 1096 def perflog(ui, repo, rev=None, **opts):
1095 1097 opts = _byteskwargs(opts)
1096 1098 if rev is None:
1097 1099 rev=[]
1098 1100 timer, fm = gettimer(ui, opts)
1099 1101 ui.pushbuffer()
1100 1102 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1101 1103 copies=opts.get(b'rename')))
1102 1104 ui.popbuffer()
1103 1105 fm.end()
1104 1106
1105 1107 @command(b'perfmoonwalk', formatteropts)
1106 1108 def perfmoonwalk(ui, repo, **opts):
1107 1109 """benchmark walking the changelog backwards
1108 1110
1109 1111 This also loads the changelog data for each revision in the changelog.
1110 1112 """
1111 1113 opts = _byteskwargs(opts)
1112 1114 timer, fm = gettimer(ui, opts)
1113 1115 def moonwalk():
1114 1116 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1115 1117 ctx = repo[i]
1116 1118 ctx.branch() # read changelog data (in addition to the index)
1117 1119 timer(moonwalk)
1118 1120 fm.end()
1119 1121
1120 1122 @command(b'perftemplating',
1121 1123 [(b'r', b'rev', [], b'revisions to run the template on'),
1122 1124 ] + formatteropts)
1123 1125 def perftemplating(ui, repo, testedtemplate=None, **opts):
1124 1126 """test the rendering time of a given template"""
1125 1127 if makelogtemplater is None:
1126 1128 raise error.Abort((b"perftemplating not available with this Mercurial"),
1127 1129 hint=b"use 4.3 or later")
1128 1130
1129 1131 opts = _byteskwargs(opts)
1130 1132
1131 1133 nullui = ui.copy()
1132 1134 nullui.fout = open(os.devnull, r'wb')
1133 1135 nullui.disablepager()
1134 1136 revs = opts.get(b'rev')
1135 1137 if not revs:
1136 1138 revs = [b'all()']
1137 1139 revs = list(scmutil.revrange(repo, revs))
1138 1140
1139 1141 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1140 1142 b' {author|person}: {desc|firstline}\n')
1141 1143 if testedtemplate is None:
1142 1144 testedtemplate = defaulttemplate
1143 1145 displayer = makelogtemplater(nullui, repo, testedtemplate)
1144 1146 def format():
1145 1147 for r in revs:
1146 1148 ctx = repo[r]
1147 1149 displayer.show(ctx)
1148 1150 displayer.flush(ctx)
1149 1151
1150 1152 timer, fm = gettimer(ui, opts)
1151 1153 timer(format)
1152 1154 fm.end()
1153 1155
1154 1156 @command(b'perfcca', formatteropts)
1155 1157 def perfcca(ui, repo, **opts):
1156 1158 opts = _byteskwargs(opts)
1157 1159 timer, fm = gettimer(ui, opts)
1158 1160 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1159 1161 fm.end()
1160 1162
1161 1163 @command(b'perffncacheload', formatteropts)
1162 1164 def perffncacheload(ui, repo, **opts):
1163 1165 opts = _byteskwargs(opts)
1164 1166 timer, fm = gettimer(ui, opts)
1165 1167 s = repo.store
1166 1168 def d():
1167 1169 s.fncache._load()
1168 1170 timer(d)
1169 1171 fm.end()
1170 1172
1171 1173 @command(b'perffncachewrite', formatteropts)
1172 1174 def perffncachewrite(ui, repo, **opts):
1173 1175 opts = _byteskwargs(opts)
1174 1176 timer, fm = gettimer(ui, opts)
1175 1177 s = repo.store
1176 1178 lock = repo.lock()
1177 1179 s.fncache._load()
1178 1180 tr = repo.transaction(b'perffncachewrite')
1179 1181 tr.addbackup(b'fncache')
1180 1182 def d():
1181 1183 s.fncache._dirty = True
1182 1184 s.fncache.write(tr)
1183 1185 timer(d)
1184 1186 tr.close()
1185 1187 lock.release()
1186 1188 fm.end()
1187 1189
1188 1190 @command(b'perffncacheencode', formatteropts)
1189 1191 def perffncacheencode(ui, repo, **opts):
1190 1192 opts = _byteskwargs(opts)
1191 1193 timer, fm = gettimer(ui, opts)
1192 1194 s = repo.store
1193 1195 s.fncache._load()
1194 1196 def d():
1195 1197 for p in s.fncache.entries:
1196 1198 s.encode(p)
1197 1199 timer(d)
1198 1200 fm.end()
1199 1201
1200 1202 def _bdiffworker(q, blocks, xdiff, ready, done):
1201 1203 while not done.is_set():
1202 1204 pair = q.get()
1203 1205 while pair is not None:
1204 1206 if xdiff:
1205 1207 mdiff.bdiff.xdiffblocks(*pair)
1206 1208 elif blocks:
1207 1209 mdiff.bdiff.blocks(*pair)
1208 1210 else:
1209 1211 mdiff.textdiff(*pair)
1210 1212 q.task_done()
1211 1213 pair = q.get()
1212 1214 q.task_done() # for the None one
1213 1215 with ready:
1214 1216 ready.wait()
1215 1217
1216 1218 def _manifestrevision(repo, mnode):
1217 1219 ml = repo.manifestlog
1218 1220
1219 1221 if util.safehasattr(ml, b'getstorage'):
1220 1222 store = ml.getstorage(b'')
1221 1223 else:
1222 1224 store = ml._revlog
1223 1225
1224 1226 return store.revision(mnode)
1225 1227
1226 1228 @command(b'perfbdiff', revlogopts + formatteropts + [
1227 1229 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1228 1230 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1229 1231 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1230 1232 (b'', b'blocks', False, b'test computing diffs into blocks'),
1231 1233 (b'', b'xdiff', False, b'use xdiff algorithm'),
1232 1234 ],
1233 1235
1234 1236 b'-c|-m|FILE REV')
1235 1237 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1236 1238 """benchmark a bdiff between revisions
1237 1239
1238 1240 By default, benchmark a bdiff between its delta parent and itself.
1239 1241
1240 1242 With ``--count``, benchmark bdiffs between delta parents and self for N
1241 1243 revisions starting at the specified revision.
1242 1244
1243 1245 With ``--alldata``, assume the requested revision is a changeset and
1244 1246 measure bdiffs for all changes related to that changeset (manifest
1245 1247 and filelogs).
1246 1248 """
1247 1249 opts = _byteskwargs(opts)
1248 1250
1249 1251 if opts[b'xdiff'] and not opts[b'blocks']:
1250 1252 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1251 1253
1252 1254 if opts[b'alldata']:
1253 1255 opts[b'changelog'] = True
1254 1256
1255 1257 if opts.get(b'changelog') or opts.get(b'manifest'):
1256 1258 file_, rev = None, file_
1257 1259 elif rev is None:
1258 1260 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1259 1261
1260 1262 blocks = opts[b'blocks']
1261 1263 xdiff = opts[b'xdiff']
1262 1264 textpairs = []
1263 1265
1264 1266 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1265 1267
1266 1268 startrev = r.rev(r.lookup(rev))
1267 1269 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1268 1270 if opts[b'alldata']:
1269 1271 # Load revisions associated with changeset.
1270 1272 ctx = repo[rev]
1271 1273 mtext = _manifestrevision(repo, ctx.manifestnode())
1272 1274 for pctx in ctx.parents():
1273 1275 pman = _manifestrevision(repo, pctx.manifestnode())
1274 1276 textpairs.append((pman, mtext))
1275 1277
1276 1278 # Load filelog revisions by iterating manifest delta.
1277 1279 man = ctx.manifest()
1278 1280 pman = ctx.p1().manifest()
1279 1281 for filename, change in pman.diff(man).items():
1280 1282 fctx = repo.file(filename)
1281 1283 f1 = fctx.revision(change[0][0] or -1)
1282 1284 f2 = fctx.revision(change[1][0] or -1)
1283 1285 textpairs.append((f1, f2))
1284 1286 else:
1285 1287 dp = r.deltaparent(rev)
1286 1288 textpairs.append((r.revision(dp), r.revision(rev)))
1287 1289
1288 1290 withthreads = threads > 0
1289 1291 if not withthreads:
1290 1292 def d():
1291 1293 for pair in textpairs:
1292 1294 if xdiff:
1293 1295 mdiff.bdiff.xdiffblocks(*pair)
1294 1296 elif blocks:
1295 1297 mdiff.bdiff.blocks(*pair)
1296 1298 else:
1297 1299 mdiff.textdiff(*pair)
1298 1300 else:
1299 1301 q = queue()
1300 1302 for i in _xrange(threads):
1301 1303 q.put(None)
1302 1304 ready = threading.Condition()
1303 1305 done = threading.Event()
1304 1306 for i in _xrange(threads):
1305 1307 threading.Thread(target=_bdiffworker,
1306 1308 args=(q, blocks, xdiff, ready, done)).start()
1307 1309 q.join()
1308 1310 def d():
1309 1311 for pair in textpairs:
1310 1312 q.put(pair)
1311 1313 for i in _xrange(threads):
1312 1314 q.put(None)
1313 1315 with ready:
1314 1316 ready.notify_all()
1315 1317 q.join()
1316 1318 timer, fm = gettimer(ui, opts)
1317 1319 timer(d)
1318 1320 fm.end()
1319 1321
1320 1322 if withthreads:
1321 1323 done.set()
1322 1324 for i in _xrange(threads):
1323 1325 q.put(None)
1324 1326 with ready:
1325 1327 ready.notify_all()
1326 1328
1327 1329 @command(b'perfunidiff', revlogopts + formatteropts + [
1328 1330 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1329 1331 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
1330 1332 ], b'-c|-m|FILE REV')
1331 1333 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1332 1334 """benchmark a unified diff between revisions
1333 1335
1334 1336 This doesn't include any copy tracing - it's just a unified diff
1335 1337 of the texts.
1336 1338
1337 1339 By default, benchmark a diff between its delta parent and itself.
1338 1340
1339 1341 With ``--count``, benchmark diffs between delta parents and self for N
1340 1342 revisions starting at the specified revision.
1341 1343
1342 1344 With ``--alldata``, assume the requested revision is a changeset and
1343 1345 measure diffs for all changes related to that changeset (manifest
1344 1346 and filelogs).
1345 1347 """
1346 1348 opts = _byteskwargs(opts)
1347 1349 if opts[b'alldata']:
1348 1350 opts[b'changelog'] = True
1349 1351
1350 1352 if opts.get(b'changelog') or opts.get(b'manifest'):
1351 1353 file_, rev = None, file_
1352 1354 elif rev is None:
1353 1355 raise error.CommandError(b'perfunidiff', b'invalid arguments')
1354 1356
1355 1357 textpairs = []
1356 1358
1357 1359 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
1358 1360
1359 1361 startrev = r.rev(r.lookup(rev))
1360 1362 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1361 1363 if opts[b'alldata']:
1362 1364 # Load revisions associated with changeset.
1363 1365 ctx = repo[rev]
1364 1366 mtext = _manifestrevision(repo, ctx.manifestnode())
1365 1367 for pctx in ctx.parents():
1366 1368 pman = _manifestrevision(repo, pctx.manifestnode())
1367 1369 textpairs.append((pman, mtext))
1368 1370
1369 1371 # Load filelog revisions by iterating manifest delta.
1370 1372 man = ctx.manifest()
1371 1373 pman = ctx.p1().manifest()
1372 1374 for filename, change in pman.diff(man).items():
1373 1375 fctx = repo.file(filename)
1374 1376 f1 = fctx.revision(change[0][0] or -1)
1375 1377 f2 = fctx.revision(change[1][0] or -1)
1376 1378 textpairs.append((f1, f2))
1377 1379 else:
1378 1380 dp = r.deltaparent(rev)
1379 1381 textpairs.append((r.revision(dp), r.revision(rev)))
1380 1382
1381 1383 def d():
1382 1384 for left, right in textpairs:
1383 1385 # The date strings don't matter, so we pass empty strings.
1384 1386 headerlines, hunks = mdiff.unidiff(
1385 1387 left, b'', right, b'', b'left', b'right', binary=False)
1386 1388 # consume iterators in roughly the way patch.py does
1387 1389 b'\n'.join(headerlines)
1388 1390 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1389 1391 timer, fm = gettimer(ui, opts)
1390 1392 timer(d)
1391 1393 fm.end()
1392 1394
1393 1395 @command(b'perfdiffwd', formatteropts)
1394 1396 def perfdiffwd(ui, repo, **opts):
1395 1397 """Profile diff of working directory changes"""
1396 1398 opts = _byteskwargs(opts)
1397 1399 timer, fm = gettimer(ui, opts)
1398 1400 options = {
1399 1401 'w': 'ignore_all_space',
1400 1402 'b': 'ignore_space_change',
1401 1403 'B': 'ignore_blank_lines',
1402 1404 }
1403 1405
1404 1406 for diffopt in ('', 'w', 'b', 'B', 'wB'):
1405 1407 opts = dict((options[c], b'1') for c in diffopt)
1406 1408 def d():
1407 1409 ui.pushbuffer()
1408 1410 commands.diff(ui, repo, **opts)
1409 1411 ui.popbuffer()
1410 1412 diffopt = diffopt.encode('ascii')
1411 1413 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
1412 1414 timer(d, title=title)
1413 1415 fm.end()
1414 1416
1415 1417 @command(b'perfrevlogindex', revlogopts + formatteropts,
1416 1418 b'-c|-m|FILE')
1417 1419 def perfrevlogindex(ui, repo, file_=None, **opts):
1418 1420 """Benchmark operations against a revlog index.
1419 1421
1420 1422 This tests constructing a revlog instance, reading index data,
1421 1423 parsing index data, and performing various operations related to
1422 1424 index data.
1423 1425 """
1424 1426
1425 1427 opts = _byteskwargs(opts)
1426 1428
1427 1429 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
1428 1430
1429 1431 opener = getattr(rl, 'opener') # trick linter
1430 1432 indexfile = rl.indexfile
1431 1433 data = opener.read(indexfile)
1432 1434
1433 1435 header = struct.unpack(b'>I', data[0:4])[0]
1434 1436 version = header & 0xFFFF
1435 1437 if version == 1:
1436 1438 revlogio = revlog.revlogio()
1437 1439 inline = header & (1 << 16)
1438 1440 else:
1439 1441 raise error.Abort((b'unsupported revlog version: %d') % version)
1440 1442
1441 1443 rllen = len(rl)
1442 1444
1443 1445 node0 = rl.node(0)
1444 1446 node25 = rl.node(rllen // 4)
1445 1447 node50 = rl.node(rllen // 2)
1446 1448 node75 = rl.node(rllen // 4 * 3)
1447 1449 node100 = rl.node(rllen - 1)
1448 1450
1449 1451 allrevs = range(rllen)
1450 1452 allrevsrev = list(reversed(allrevs))
1451 1453 allnodes = [rl.node(rev) for rev in range(rllen)]
1452 1454 allnodesrev = list(reversed(allnodes))
1453 1455
1454 1456 def constructor():
1455 1457 revlog.revlog(opener, indexfile)
1456 1458
1457 1459 def read():
1458 1460 with opener(indexfile) as fh:
1459 1461 fh.read()
1460 1462
1461 1463 def parseindex():
1462 1464 revlogio.parseindex(data, inline)
1463 1465
1464 1466 def getentry(revornode):
1465 1467 index = revlogio.parseindex(data, inline)[0]
1466 1468 index[revornode]
1467 1469
1468 1470 def getentries(revs, count=1):
1469 1471 index = revlogio.parseindex(data, inline)[0]
1470 1472
1471 1473 for i in range(count):
1472 1474 for rev in revs:
1473 1475 index[rev]
1474 1476
1475 1477 def resolvenode(node):
1476 1478 nodemap = revlogio.parseindex(data, inline)[1]
1477 1479 # This only works for the C code.
1478 1480 if nodemap is None:
1479 1481 return
1480 1482
1481 1483 try:
1482 1484 nodemap[node]
1483 1485 except error.RevlogError:
1484 1486 pass
1485 1487
1486 1488 def resolvenodes(nodes, count=1):
1487 1489 nodemap = revlogio.parseindex(data, inline)[1]
1488 1490 if nodemap is None:
1489 1491 return
1490 1492
1491 1493 for i in range(count):
1492 1494 for node in nodes:
1493 1495 try:
1494 1496 nodemap[node]
1495 1497 except error.RevlogError:
1496 1498 pass
1497 1499
1498 1500 benches = [
1499 1501 (constructor, b'revlog constructor'),
1500 1502 (read, b'read'),
1501 1503 (parseindex, b'create index object'),
1502 1504 (lambda: getentry(0), b'retrieve index entry for rev 0'),
1503 1505 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
1504 1506 (lambda: resolvenode(node0), b'look up node at rev 0'),
1505 1507 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
1506 1508 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
1507 1509 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
1508 1510 (lambda: resolvenode(node100), b'look up node at tip'),
1509 1511 # 2x variation is to measure caching impact.
1510 1512 (lambda: resolvenodes(allnodes),
1511 1513 b'look up all nodes (forward)'),
1512 1514 (lambda: resolvenodes(allnodes, 2),
1513 1515 b'look up all nodes 2x (forward)'),
1514 1516 (lambda: resolvenodes(allnodesrev),
1515 1517 b'look up all nodes (reverse)'),
1516 1518 (lambda: resolvenodes(allnodesrev, 2),
1517 1519 b'look up all nodes 2x (reverse)'),
1518 1520 (lambda: getentries(allrevs),
1519 1521 b'retrieve all index entries (forward)'),
1520 1522 (lambda: getentries(allrevs, 2),
1521 1523 b'retrieve all index entries 2x (forward)'),
1522 1524 (lambda: getentries(allrevsrev),
1523 1525 b'retrieve all index entries (reverse)'),
1524 1526 (lambda: getentries(allrevsrev, 2),
1525 1527 b'retrieve all index entries 2x (reverse)'),
1526 1528 ]
1527 1529
1528 1530 for fn, title in benches:
1529 1531 timer, fm = gettimer(ui, opts)
1530 1532 timer(fn, title=title)
1531 1533 fm.end()
1532 1534
1533 1535 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
1534 1536 [(b'd', b'dist', 100, b'distance between the revisions'),
1535 1537 (b's', b'startrev', 0, b'revision to start reading at'),
1536 1538 (b'', b'reverse', False, b'read in reverse')],
1537 1539 b'-c|-m|FILE')
1538 1540 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
1539 1541 **opts):
1540 1542 """Benchmark reading a series of revisions from a revlog.
1541 1543
1542 1544 By default, we read every ``-d/--dist`` revision from 0 to tip of
1543 1545 the specified revlog.
1544 1546
1545 1547 The start revision can be defined via ``-s/--startrev``.
1546 1548 """
1547 1549 opts = _byteskwargs(opts)
1548 1550
1549 1551 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
1550 1552 rllen = getlen(ui)(rl)
1551 1553
1552 1554 if startrev < 0:
1553 1555 startrev = rllen + startrev
1554 1556
1555 1557 def d():
1556 1558 rl.clearcaches()
1557 1559
1558 1560 beginrev = startrev
1559 1561 endrev = rllen
1560 1562 dist = opts[b'dist']
1561 1563
1562 1564 if reverse:
1563 1565 beginrev, endrev = endrev - 1, beginrev - 1
1564 1566 dist = -1 * dist
1565 1567
1566 1568 for x in _xrange(beginrev, endrev, dist):
1567 1569 # Old revisions don't support passing int.
1568 1570 n = rl.node(x)
1569 1571 rl.revision(n)
1570 1572
1571 1573 timer, fm = gettimer(ui, opts)
1572 1574 timer(d)
1573 1575 fm.end()
1574 1576
1575 1577 @command(b'perfrevlogwrite', revlogopts + formatteropts +
1576 1578 [(b's', b'startrev', 1000, b'revision to start writing at'),
1577 1579 (b'', b'stoprev', -1, b'last revision to write'),
1578 1580 (b'', b'count', 3, b'last revision to write'),
1579 1581 (b'', b'details', False, b'print timing for every revisions tested'),
1580 1582 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
1581 1583 (b'', b'lazydeltabase', True, b'try the provided delta first'),
1582 1584 ],
1583 1585 b'-c|-m|FILE')
1584 1586 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
1585 1587 """Benchmark writing a series of revisions to a revlog.
1586 1588
1587 1589 Possible source values are:
1588 1590 * `full`: add from a full text (default).
1589 1591 * `parent-1`: add from a delta to the first parent
1590 1592 * `parent-2`: add from a delta to the second parent if it exists
1591 1593 (use a delta from the first parent otherwise)
1592 1594 * `parent-smallest`: add from the smallest delta (either p1 or p2)
1593 1595 * `storage`: add from the existing precomputed deltas
1594 1596 """
1595 1597 opts = _byteskwargs(opts)
1596 1598
1597 1599 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
1598 1600 rllen = getlen(ui)(rl)
1599 1601 if startrev < 0:
1600 1602 startrev = rllen + startrev
1601 1603 if stoprev < 0:
1602 1604 stoprev = rllen + stoprev
1603 1605
1604 1606 lazydeltabase = opts['lazydeltabase']
1605 1607 source = opts['source']
1606 1608 validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest',
1607 1609 b'storage')
1608 1610 if source not in validsource:
1609 1611 raise error.Abort('invalid source type: %s' % source)
1610 1612
1611 1613 ### actually gather results
1612 1614 count = opts['count']
1613 1615 if count <= 0:
1614 1616 raise error.Abort('invalide run count: %d' % count)
1615 1617 allresults = []
1616 1618 for c in range(count):
1617 1619 timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1,
1618 1620 lazydeltabase=lazydeltabase)
1619 1621 allresults.append(timing)
1620 1622
1621 1623 ### consolidate the results in a single list
1622 1624 results = []
1623 1625 for idx, (rev, t) in enumerate(allresults[0]):
1624 1626 ts = [t]
1625 1627 for other in allresults[1:]:
1626 1628 orev, ot = other[idx]
1627 1629 assert orev == rev
1628 1630 ts.append(ot)
1629 1631 results.append((rev, ts))
1630 1632 resultcount = len(results)
1631 1633
1632 1634 ### Compute and display relevant statistics
1633 1635
1634 1636 # get a formatter
1635 1637 fm = ui.formatter(b'perf', opts)
1636 1638 displayall = ui.configbool(b"perf", b"all-timing", False)
1637 1639
1638 1640 # print individual details if requested
1639 1641 if opts['details']:
1640 1642 for idx, item in enumerate(results, 1):
1641 1643 rev, data = item
1642 1644 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
1643 1645 formatone(fm, data, title=title, displayall=displayall)
1644 1646
1645 1647 # sorts results by median time
1646 1648 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
1647 1649 # list of (name, index) to display)
1648 1650 relevants = [
1649 1651 ("min", 0),
1650 1652 ("10%", resultcount * 10 // 100),
1651 1653 ("25%", resultcount * 25 // 100),
1652 1654 ("50%", resultcount * 70 // 100),
1653 1655 ("75%", resultcount * 75 // 100),
1654 1656 ("90%", resultcount * 90 // 100),
1655 1657 ("95%", resultcount * 95 // 100),
1656 1658 ("99%", resultcount * 99 // 100),
1657 1659 ("max", -1),
1658 1660 ]
1659 1661 if not ui.quiet:
1660 1662 for name, idx in relevants:
1661 1663 data = results[idx]
1662 1664 title = '%s of %d, rev %d' % (name, resultcount, data[0])
1663 1665 formatone(fm, data[1], title=title, displayall=displayall)
1664 1666
1665 1667 # XXX summing that many float will not be very precise, we ignore this fact
1666 1668 # for now
1667 1669 totaltime = []
1668 1670 for item in allresults:
1669 1671 totaltime.append((sum(x[1][0] for x in item),
1670 1672 sum(x[1][1] for x in item),
1671 1673 sum(x[1][2] for x in item),)
1672 1674 )
1673 1675 formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
1674 1676 displayall=displayall)
1675 1677 fm.end()
1676 1678
1677 1679 class _faketr(object):
1678 1680 def add(s, x, y, z=None):
1679 1681 return None
1680 1682
1681 1683 def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None,
1682 1684 lazydeltabase=True):
1683 1685 timings = []
1684 1686 tr = _faketr()
1685 1687 with _temprevlog(ui, orig, startrev) as dest:
1686 1688 dest._lazydeltabase = lazydeltabase
1687 1689 revs = list(orig.revs(startrev, stoprev))
1688 1690 total = len(revs)
1689 1691 topic = 'adding'
1690 1692 if runidx is not None:
1691 1693 topic += ' (run #%d)' % runidx
1692 1694 for idx, rev in enumerate(revs):
1693 1695 ui.progress(topic, idx, unit='revs', total=total)
1694 1696 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
1695 1697 with timeone() as r:
1696 1698 dest.addrawrevision(*addargs, **addkwargs)
1697 1699 timings.append((rev, r[0]))
1698 1700 ui.progress(topic, total, unit='revs', total=total)
1699 1701 ui.progress(topic, None, unit='revs', total=total)
1700 1702 return timings
1701 1703
1702 1704 def _getrevisionseed(orig, rev, tr, source):
1703 1705 from mercurial.node import nullid
1704 1706
1705 1707 linkrev = orig.linkrev(rev)
1706 1708 node = orig.node(rev)
1707 1709 p1, p2 = orig.parents(node)
1708 1710 flags = orig.flags(rev)
1709 1711 cachedelta = None
1710 1712 text = None
1711 1713
1712 1714 if source == b'full':
1713 1715 text = orig.revision(rev)
1714 1716 elif source == b'parent-1':
1715 1717 baserev = orig.rev(p1)
1716 1718 cachedelta = (baserev, orig.revdiff(p1, rev))
1717 1719 elif source == b'parent-2':
1718 1720 parent = p2
1719 1721 if p2 == nullid:
1720 1722 parent = p1
1721 1723 baserev = orig.rev(parent)
1722 1724 cachedelta = (baserev, orig.revdiff(parent, rev))
1723 1725 elif source == b'parent-smallest':
1724 1726 p1diff = orig.revdiff(p1, rev)
1725 1727 parent = p1
1726 1728 diff = p1diff
1727 1729 if p2 != nullid:
1728 1730 p2diff = orig.revdiff(p2, rev)
1729 1731 if len(p1diff) > len(p2diff):
1730 1732 parent = p2
1731 1733 diff = p2diff
1732 1734 baserev = orig.rev(parent)
1733 1735 cachedelta = (baserev, diff)
1734 1736 elif source == b'storage':
1735 1737 baserev = orig.deltaparent(rev)
1736 1738 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
1737 1739
1738 1740 return ((text, tr, linkrev, p1, p2),
1739 1741 {'node': node, 'flags': flags, 'cachedelta': cachedelta})
1740 1742
1741 1743 @contextlib.contextmanager
1742 1744 def _temprevlog(ui, orig, truncaterev):
1743 1745 from mercurial import vfs as vfsmod
1744 1746
1745 1747 if orig._inline:
1746 1748 raise error.Abort('not supporting inline revlog (yet)')
1747 1749
1748 1750 origindexpath = orig.opener.join(orig.indexfile)
1749 1751 origdatapath = orig.opener.join(orig.datafile)
1750 1752 indexname = 'revlog.i'
1751 1753 dataname = 'revlog.d'
1752 1754
1753 1755 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
1754 1756 try:
1755 1757 # copy the data file in a temporary directory
1756 1758 ui.debug('copying data in %s\n' % tmpdir)
1757 1759 destindexpath = os.path.join(tmpdir, 'revlog.i')
1758 1760 destdatapath = os.path.join(tmpdir, 'revlog.d')
1759 1761 shutil.copyfile(origindexpath, destindexpath)
1760 1762 shutil.copyfile(origdatapath, destdatapath)
1761 1763
1762 1764 # remove the data we want to add again
1763 1765 ui.debug('truncating data to be rewritten\n')
1764 1766 with open(destindexpath, 'ab') as index:
1765 1767 index.seek(0)
1766 1768 index.truncate(truncaterev * orig._io.size)
1767 1769 with open(destdatapath, 'ab') as data:
1768 1770 data.seek(0)
1769 1771 data.truncate(orig.start(truncaterev))
1770 1772
1771 1773 # instantiate a new revlog from the temporary copy
1772 1774 ui.debug('truncating adding to be rewritten\n')
1773 1775 vfs = vfsmod.vfs(tmpdir)
1774 1776 vfs.options = getattr(orig.opener, 'options', None)
1775 1777
1776 1778 dest = revlog.revlog(vfs,
1777 1779 indexfile=indexname,
1778 1780 datafile=dataname)
1779 1781 if dest._inline:
1780 1782 raise error.Abort('not supporting inline revlog (yet)')
1781 1783 # make sure internals are initialized
1782 1784 dest.revision(len(dest) - 1)
1783 1785 yield dest
1784 1786 del dest, vfs
1785 1787 finally:
1786 1788 shutil.rmtree(tmpdir, True)
1787 1789
1788 1790 @command(b'perfrevlogchunks', revlogopts + formatteropts +
1789 1791 [(b'e', b'engines', b'', b'compression engines to use'),
1790 1792 (b's', b'startrev', 0, b'revision to start at')],
1791 1793 b'-c|-m|FILE')
1792 1794 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
1793 1795 """Benchmark operations on revlog chunks.
1794 1796
1795 1797 Logically, each revlog is a collection of fulltext revisions. However,
1796 1798 stored within each revlog are "chunks" of possibly compressed data. This
1797 1799 data needs to be read and decompressed or compressed and written.
1798 1800
1799 1801 This command measures the time it takes to read+decompress and recompress
1800 1802 chunks in a revlog. It effectively isolates I/O and compression performance.
1801 1803 For measurements of higher-level operations like resolving revisions,
1802 1804 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
1803 1805 """
1804 1806 opts = _byteskwargs(opts)
1805 1807
1806 1808 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
1807 1809
1808 1810 # _chunkraw was renamed to _getsegmentforrevs.
1809 1811 try:
1810 1812 segmentforrevs = rl._getsegmentforrevs
1811 1813 except AttributeError:
1812 1814 segmentforrevs = rl._chunkraw
1813 1815
1814 1816 # Verify engines argument.
1815 1817 if engines:
1816 1818 engines = set(e.strip() for e in engines.split(b','))
1817 1819 for engine in engines:
1818 1820 try:
1819 1821 util.compressionengines[engine]
1820 1822 except KeyError:
1821 1823 raise error.Abort(b'unknown compression engine: %s' % engine)
1822 1824 else:
1823 1825 engines = []
1824 1826 for e in util.compengines:
1825 1827 engine = util.compengines[e]
1826 1828 try:
1827 1829 if engine.available():
1828 1830 engine.revlogcompressor().compress(b'dummy')
1829 1831 engines.append(e)
1830 1832 except NotImplementedError:
1831 1833 pass
1832 1834
1833 1835 revs = list(rl.revs(startrev, len(rl) - 1))
1834 1836
1835 1837 def rlfh(rl):
1836 1838 if rl._inline:
1837 1839 return getsvfs(repo)(rl.indexfile)
1838 1840 else:
1839 1841 return getsvfs(repo)(rl.datafile)
1840 1842
1841 1843 def doread():
1842 1844 rl.clearcaches()
1843 1845 for rev in revs:
1844 1846 segmentforrevs(rev, rev)
1845 1847
1846 1848 def doreadcachedfh():
1847 1849 rl.clearcaches()
1848 1850 fh = rlfh(rl)
1849 1851 for rev in revs:
1850 1852 segmentforrevs(rev, rev, df=fh)
1851 1853
1852 1854 def doreadbatch():
1853 1855 rl.clearcaches()
1854 1856 segmentforrevs(revs[0], revs[-1])
1855 1857
1856 1858 def doreadbatchcachedfh():
1857 1859 rl.clearcaches()
1858 1860 fh = rlfh(rl)
1859 1861 segmentforrevs(revs[0], revs[-1], df=fh)
1860 1862
1861 1863 def dochunk():
1862 1864 rl.clearcaches()
1863 1865 fh = rlfh(rl)
1864 1866 for rev in revs:
1865 1867 rl._chunk(rev, df=fh)
1866 1868
1867 1869 chunks = [None]
1868 1870
1869 1871 def dochunkbatch():
1870 1872 rl.clearcaches()
1871 1873 fh = rlfh(rl)
1872 1874 # Save chunks as a side-effect.
1873 1875 chunks[0] = rl._chunks(revs, df=fh)
1874 1876
1875 1877 def docompress(compressor):
1876 1878 rl.clearcaches()
1877 1879
1878 1880 try:
1879 1881 # Swap in the requested compression engine.
1880 1882 oldcompressor = rl._compressor
1881 1883 rl._compressor = compressor
1882 1884 for chunk in chunks[0]:
1883 1885 rl.compress(chunk)
1884 1886 finally:
1885 1887 rl._compressor = oldcompressor
1886 1888
1887 1889 benches = [
1888 1890 (lambda: doread(), b'read'),
1889 1891 (lambda: doreadcachedfh(), b'read w/ reused fd'),
1890 1892 (lambda: doreadbatch(), b'read batch'),
1891 1893 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
1892 1894 (lambda: dochunk(), b'chunk'),
1893 1895 (lambda: dochunkbatch(), b'chunk batch'),
1894 1896 ]
1895 1897
1896 1898 for engine in sorted(engines):
1897 1899 compressor = util.compengines[engine].revlogcompressor()
1898 1900 benches.append((functools.partial(docompress, compressor),
1899 1901 b'compress w/ %s' % engine))
1900 1902
1901 1903 for fn, title in benches:
1902 1904 timer, fm = gettimer(ui, opts)
1903 1905 timer(fn, title=title)
1904 1906 fm.end()
1905 1907
1906 1908 @command(b'perfrevlogrevision', revlogopts + formatteropts +
1907 1909 [(b'', b'cache', False, b'use caches instead of clearing')],
1908 1910 b'-c|-m|FILE REV')
1909 1911 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
1910 1912 """Benchmark obtaining a revlog revision.
1911 1913
1912 1914 Obtaining a revlog revision consists of roughly the following steps:
1913 1915
1914 1916 1. Compute the delta chain
1915 1917 2. Slice the delta chain if applicable
1916 1918 3. Obtain the raw chunks for that delta chain
1917 1919 4. Decompress each raw chunk
1918 1920 5. Apply binary patches to obtain fulltext
1919 1921 6. Verify hash of fulltext
1920 1922
1921 1923 This command measures the time spent in each of these phases.
1922 1924 """
1923 1925 opts = _byteskwargs(opts)
1924 1926
1925 1927 if opts.get(b'changelog') or opts.get(b'manifest'):
1926 1928 file_, rev = None, file_
1927 1929 elif rev is None:
1928 1930 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
1929 1931
1930 1932 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
1931 1933
1932 1934 # _chunkraw was renamed to _getsegmentforrevs.
1933 1935 try:
1934 1936 segmentforrevs = r._getsegmentforrevs
1935 1937 except AttributeError:
1936 1938 segmentforrevs = r._chunkraw
1937 1939
1938 1940 node = r.lookup(rev)
1939 1941 rev = r.rev(node)
1940 1942
1941 1943 def getrawchunks(data, chain):
1942 1944 start = r.start
1943 1945 length = r.length
1944 1946 inline = r._inline
1945 1947 iosize = r._io.size
1946 1948 buffer = util.buffer
1947 1949
1948 1950 chunks = []
1949 1951 ladd = chunks.append
1950 1952 for idx, item in enumerate(chain):
1951 1953 offset = start(item[0])
1952 1954 bits = data[idx]
1953 1955 for rev in item:
1954 1956 chunkstart = start(rev)
1955 1957 if inline:
1956 1958 chunkstart += (rev + 1) * iosize
1957 1959 chunklength = length(rev)
1958 1960 ladd(buffer(bits, chunkstart - offset, chunklength))
1959 1961
1960 1962 return chunks
1961 1963
1962 1964 def dodeltachain(rev):
1963 1965 if not cache:
1964 1966 r.clearcaches()
1965 1967 r._deltachain(rev)
1966 1968
1967 1969 def doread(chain):
1968 1970 if not cache:
1969 1971 r.clearcaches()
1970 1972 for item in slicedchain:
1971 1973 segmentforrevs(item[0], item[-1])
1972 1974
1973 1975 def doslice(r, chain, size):
1974 1976 for s in slicechunk(r, chain, targetsize=size):
1975 1977 pass
1976 1978
1977 1979 def dorawchunks(data, chain):
1978 1980 if not cache:
1979 1981 r.clearcaches()
1980 1982 getrawchunks(data, chain)
1981 1983
1982 1984 def dodecompress(chunks):
1983 1985 decomp = r.decompress
1984 1986 for chunk in chunks:
1985 1987 decomp(chunk)
1986 1988
1987 1989 def dopatch(text, bins):
1988 1990 if not cache:
1989 1991 r.clearcaches()
1990 1992 mdiff.patches(text, bins)
1991 1993
1992 1994 def dohash(text):
1993 1995 if not cache:
1994 1996 r.clearcaches()
1995 1997 r.checkhash(text, node, rev=rev)
1996 1998
1997 1999 def dorevision():
1998 2000 if not cache:
1999 2001 r.clearcaches()
2000 2002 r.revision(node)
2001 2003
2002 2004 try:
2003 2005 from mercurial.revlogutils.deltas import slicechunk
2004 2006 except ImportError:
2005 2007 slicechunk = getattr(revlog, '_slicechunk', None)
2006 2008
2007 2009 size = r.length(rev)
2008 2010 chain = r._deltachain(rev)[0]
2009 2011 if not getattr(r, '_withsparseread', False):
2010 2012 slicedchain = (chain,)
2011 2013 else:
2012 2014 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
2013 2015 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
2014 2016 rawchunks = getrawchunks(data, slicedchain)
2015 2017 bins = r._chunks(chain)
2016 2018 text = bytes(bins[0])
2017 2019 bins = bins[1:]
2018 2020 text = mdiff.patches(text, bins)
2019 2021
2020 2022 benches = [
2021 2023 (lambda: dorevision(), b'full'),
2022 2024 (lambda: dodeltachain(rev), b'deltachain'),
2023 2025 (lambda: doread(chain), b'read'),
2024 2026 ]
2025 2027
2026 2028 if getattr(r, '_withsparseread', False):
2027 2029 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
2028 2030 benches.append(slicing)
2029 2031
2030 2032 benches.extend([
2031 2033 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
2032 2034 (lambda: dodecompress(rawchunks), b'decompress'),
2033 2035 (lambda: dopatch(text, bins), b'patch'),
2034 2036 (lambda: dohash(text), b'hash'),
2035 2037 ])
2036 2038
2037 2039 timer, fm = gettimer(ui, opts)
2038 2040 for fn, title in benches:
2039 2041 timer(fn, title=title)
2040 2042 fm.end()
2041 2043
2042 2044 @command(b'perfrevset',
2043 2045 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
2044 2046 (b'', b'contexts', False, b'obtain changectx for each revision')]
2045 2047 + formatteropts, b"REVSET")
2046 2048 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
2047 2049 """benchmark the execution time of a revset
2048 2050
2049 2051 Use the --clean option if need to evaluate the impact of build volatile
2050 2052 revisions set cache on the revset execution. Volatile cache hold filtered
2051 2053 and obsolete related cache."""
2052 2054 opts = _byteskwargs(opts)
2053 2055
2054 2056 timer, fm = gettimer(ui, opts)
2055 2057 def d():
2056 2058 if clear:
2057 2059 repo.invalidatevolatilesets()
2058 2060 if contexts:
2059 2061 for ctx in repo.set(expr): pass
2060 2062 else:
2061 2063 for r in repo.revs(expr): pass
2062 2064 timer(d)
2063 2065 fm.end()
2064 2066
2065 2067 @command(b'perfvolatilesets',
2066 2068 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
2067 2069 ] + formatteropts)
2068 2070 def perfvolatilesets(ui, repo, *names, **opts):
2069 2071 """benchmark the computation of various volatile set
2070 2072
2071 2073 Volatile set computes element related to filtering and obsolescence."""
2072 2074 opts = _byteskwargs(opts)
2073 2075 timer, fm = gettimer(ui, opts)
2074 2076 repo = repo.unfiltered()
2075 2077
2076 2078 def getobs(name):
2077 2079 def d():
2078 2080 repo.invalidatevolatilesets()
2079 2081 if opts[b'clear_obsstore']:
2080 2082 clearfilecache(repo, b'obsstore')
2081 2083 obsolete.getrevs(repo, name)
2082 2084 return d
2083 2085
2084 2086 allobs = sorted(obsolete.cachefuncs)
2085 2087 if names:
2086 2088 allobs = [n for n in allobs if n in names]
2087 2089
2088 2090 for name in allobs:
2089 2091 timer(getobs(name), title=name)
2090 2092
2091 2093 def getfiltered(name):
2092 2094 def d():
2093 2095 repo.invalidatevolatilesets()
2094 2096 if opts[b'clear_obsstore']:
2095 2097 clearfilecache(repo, b'obsstore')
2096 2098 repoview.filterrevs(repo, name)
2097 2099 return d
2098 2100
2099 2101 allfilter = sorted(repoview.filtertable)
2100 2102 if names:
2101 2103 allfilter = [n for n in allfilter if n in names]
2102 2104
2103 2105 for name in allfilter:
2104 2106 timer(getfiltered(name), title=name)
2105 2107 fm.end()
2106 2108
2107 2109 @command(b'perfbranchmap',
2108 2110 [(b'f', b'full', False,
2109 2111 b'Includes build time of subset'),
2110 2112 (b'', b'clear-revbranch', False,
2111 2113 b'purge the revbranch cache between computation'),
2112 2114 ] + formatteropts)
2113 2115 def perfbranchmap(ui, repo, *filternames, **opts):
2114 2116 """benchmark the update of a branchmap
2115 2117
2116 2118 This benchmarks the full repo.branchmap() call with read and write disabled
2117 2119 """
2118 2120 opts = _byteskwargs(opts)
2119 2121 full = opts.get(b"full", False)
2120 2122 clear_revbranch = opts.get(b"clear_revbranch", False)
2121 2123 timer, fm = gettimer(ui, opts)
2122 2124 def getbranchmap(filtername):
2123 2125 """generate a benchmark function for the filtername"""
2124 2126 if filtername is None:
2125 2127 view = repo
2126 2128 else:
2127 2129 view = repo.filtered(filtername)
2128 2130 def d():
2129 2131 if clear_revbranch:
2130 2132 repo.revbranchcache()._clear()
2131 2133 if full:
2132 2134 view._branchcaches.clear()
2133 2135 else:
2134 2136 view._branchcaches.pop(filtername, None)
2135 2137 view.branchmap()
2136 2138 return d
2137 2139 # add filter in smaller subset to bigger subset
2138 2140 possiblefilters = set(repoview.filtertable)
2139 2141 if filternames:
2140 2142 possiblefilters &= set(filternames)
2141 2143 subsettable = getbranchmapsubsettable()
2142 2144 allfilters = []
2143 2145 while possiblefilters:
2144 2146 for name in possiblefilters:
2145 2147 subset = subsettable.get(name)
2146 2148 if subset not in possiblefilters:
2147 2149 break
2148 2150 else:
2149 2151 assert False, b'subset cycle %s!' % possiblefilters
2150 2152 allfilters.append(name)
2151 2153 possiblefilters.remove(name)
2152 2154
2153 2155 # warm the cache
2154 2156 if not full:
2155 2157 for name in allfilters:
2156 2158 repo.filtered(name).branchmap()
2157 2159 if not filternames or b'unfiltered' in filternames:
2158 2160 # add unfiltered
2159 2161 allfilters.append(None)
2160 2162
2161 2163 branchcacheread = safeattrsetter(branchmap, b'read')
2162 2164 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
2163 2165 branchcacheread.set(lambda repo: None)
2164 2166 branchcachewrite.set(lambda bc, repo: None)
2165 2167 try:
2166 2168 for name in allfilters:
2167 2169 printname = name
2168 2170 if name is None:
2169 2171 printname = b'unfiltered'
2170 2172 timer(getbranchmap(name), title=str(printname))
2171 2173 finally:
2172 2174 branchcacheread.restore()
2173 2175 branchcachewrite.restore()
2174 2176 fm.end()
2175 2177
2176 2178 @command(b'perfbranchmapload', [
2177 2179 (b'f', b'filter', b'', b'Specify repoview filter'),
2178 2180 (b'', b'list', False, b'List brachmap filter caches'),
2179 2181 ] + formatteropts)
2180 2182 def perfbranchmapread(ui, repo, filter=b'', list=False, **opts):
2181 2183 """benchmark reading the branchmap"""
2182 2184 opts = _byteskwargs(opts)
2183 2185
2184 2186 if list:
2185 2187 for name, kind, st in repo.cachevfs.readdir(stat=True):
2186 2188 if name.startswith(b'branch2'):
2187 2189 filtername = name.partition(b'-')[2] or b'unfiltered'
2188 2190 ui.status(b'%s - %s\n'
2189 2191 % (filtername, util.bytecount(st.st_size)))
2190 2192 return
2191 2193 if filter:
2192 2194 repo = repoview.repoview(repo, filter)
2193 2195 else:
2194 2196 repo = repo.unfiltered()
2195 2197 # try once without timer, the filter may not be cached
2196 2198 if branchmap.read(repo) is None:
2197 2199 raise error.Abort(b'No brachmap cached for %s repo'
2198 2200 % (filter or b'unfiltered'))
2199 2201 timer, fm = gettimer(ui, opts)
2200 2202 timer(lambda: branchmap.read(repo) and None)
2201 2203 fm.end()
2202 2204
2203 2205 @command(b'perfloadmarkers')
2204 2206 def perfloadmarkers(ui, repo):
2205 2207 """benchmark the time to parse the on-disk markers for a repo
2206 2208
2207 2209 Result is the number of markers in the repo."""
2208 2210 timer, fm = gettimer(ui)
2209 2211 svfs = getsvfs(repo)
2210 2212 timer(lambda: len(obsolete.obsstore(svfs)))
2211 2213 fm.end()
2212 2214
2213 2215 @command(b'perflrucachedict', formatteropts +
2214 2216 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
2215 2217 (b'', b'mincost', 0, b'smallest cost of items in cache'),
2216 2218 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
2217 2219 (b'', b'size', 4, b'size of cache'),
2218 2220 (b'', b'gets', 10000, b'number of key lookups'),
2219 2221 (b'', b'sets', 10000, b'number of key sets'),
2220 2222 (b'', b'mixed', 10000, b'number of mixed mode operations'),
2221 2223 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
2222 2224 norepo=True)
2223 2225 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
2224 2226 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
2225 2227 opts = _byteskwargs(opts)
2226 2228
2227 2229 def doinit():
2228 2230 for i in _xrange(10000):
2229 2231 util.lrucachedict(size)
2230 2232
2231 2233 costrange = list(range(mincost, maxcost + 1))
2232 2234
2233 2235 values = []
2234 2236 for i in _xrange(size):
2235 2237 values.append(random.randint(0, _maxint))
2236 2238
2237 2239 # Get mode fills the cache and tests raw lookup performance with no
2238 2240 # eviction.
2239 2241 getseq = []
2240 2242 for i in _xrange(gets):
2241 2243 getseq.append(random.choice(values))
2242 2244
2243 2245 def dogets():
2244 2246 d = util.lrucachedict(size)
2245 2247 for v in values:
2246 2248 d[v] = v
2247 2249 for key in getseq:
2248 2250 value = d[key]
2249 2251 value # silence pyflakes warning
2250 2252
2251 2253 def dogetscost():
2252 2254 d = util.lrucachedict(size, maxcost=costlimit)
2253 2255 for i, v in enumerate(values):
2254 2256 d.insert(v, v, cost=costs[i])
2255 2257 for key in getseq:
2256 2258 try:
2257 2259 value = d[key]
2258 2260 value # silence pyflakes warning
2259 2261 except KeyError:
2260 2262 pass
2261 2263
2262 2264 # Set mode tests insertion speed with cache eviction.
2263 2265 setseq = []
2264 2266 costs = []
2265 2267 for i in _xrange(sets):
2266 2268 setseq.append(random.randint(0, _maxint))
2267 2269 costs.append(random.choice(costrange))
2268 2270
2269 2271 def doinserts():
2270 2272 d = util.lrucachedict(size)
2271 2273 for v in setseq:
2272 2274 d.insert(v, v)
2273 2275
2274 2276 def doinsertscost():
2275 2277 d = util.lrucachedict(size, maxcost=costlimit)
2276 2278 for i, v in enumerate(setseq):
2277 2279 d.insert(v, v, cost=costs[i])
2278 2280
2279 2281 def dosets():
2280 2282 d = util.lrucachedict(size)
2281 2283 for v in setseq:
2282 2284 d[v] = v
2283 2285
2284 2286 # Mixed mode randomly performs gets and sets with eviction.
2285 2287 mixedops = []
2286 2288 for i in _xrange(mixed):
2287 2289 r = random.randint(0, 100)
2288 2290 if r < mixedgetfreq:
2289 2291 op = 0
2290 2292 else:
2291 2293 op = 1
2292 2294
2293 2295 mixedops.append((op,
2294 2296 random.randint(0, size * 2),
2295 2297 random.choice(costrange)))
2296 2298
2297 2299 def domixed():
2298 2300 d = util.lrucachedict(size)
2299 2301
2300 2302 for op, v, cost in mixedops:
2301 2303 if op == 0:
2302 2304 try:
2303 2305 d[v]
2304 2306 except KeyError:
2305 2307 pass
2306 2308 else:
2307 2309 d[v] = v
2308 2310
2309 2311 def domixedcost():
2310 2312 d = util.lrucachedict(size, maxcost=costlimit)
2311 2313
2312 2314 for op, v, cost in mixedops:
2313 2315 if op == 0:
2314 2316 try:
2315 2317 d[v]
2316 2318 except KeyError:
2317 2319 pass
2318 2320 else:
2319 2321 d.insert(v, v, cost=cost)
2320 2322
2321 2323 benches = [
2322 2324 (doinit, b'init'),
2323 2325 ]
2324 2326
2325 2327 if costlimit:
2326 2328 benches.extend([
2327 2329 (dogetscost, b'gets w/ cost limit'),
2328 2330 (doinsertscost, b'inserts w/ cost limit'),
2329 2331 (domixedcost, b'mixed w/ cost limit'),
2330 2332 ])
2331 2333 else:
2332 2334 benches.extend([
2333 2335 (dogets, b'gets'),
2334 2336 (doinserts, b'inserts'),
2335 2337 (dosets, b'sets'),
2336 2338 (domixed, b'mixed')
2337 2339 ])
2338 2340
2339 2341 for fn, title in benches:
2340 2342 timer, fm = gettimer(ui, opts)
2341 2343 timer(fn, title=title)
2342 2344 fm.end()
2343 2345
2344 2346 @command(b'perfwrite', formatteropts)
2345 2347 def perfwrite(ui, repo, **opts):
2346 2348 """microbenchmark ui.write
2347 2349 """
2348 2350 opts = _byteskwargs(opts)
2349 2351
2350 2352 timer, fm = gettimer(ui, opts)
2351 2353 def write():
2352 2354 for i in range(100000):
2353 2355 ui.write((b'Testing write performance\n'))
2354 2356 timer(write)
2355 2357 fm.end()
2356 2358
2357 2359 def uisetup(ui):
2358 2360 if (util.safehasattr(cmdutil, b'openrevlog') and
2359 2361 not util.safehasattr(commands, b'debugrevlogopts')):
2360 2362 # for "historical portability":
2361 2363 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
2362 2364 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
2363 2365 # openrevlog() should cause failure, because it has been
2364 2366 # available since 3.5 (or 49c583ca48c4).
2365 2367 def openrevlog(orig, repo, cmd, file_, opts):
2366 2368 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
2367 2369 raise error.Abort(b"This version doesn't support --dir option",
2368 2370 hint=b"use 3.5 or later")
2369 2371 return orig(repo, cmd, file_, opts)
2370 2372 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
General Comments 0
You need to be logged in to leave comments. Login now