##// END OF EJS Templates
debug: allow specifying a manifest node rather than a revision
Martijn Pieters -
r38802:ddb15a83 default
parent child Browse files
Show More
@@ -1,1827 +1,1833
1 1 # perf.py - performance test routines
2 2 '''helper extension to measure performance'''
3 3
4 4 # "historical portability" policy of perf.py:
5 5 #
6 6 # We have to do:
7 7 # - make perf.py "loadable" with as wide Mercurial version as possible
8 8 # This doesn't mean that perf commands work correctly with that Mercurial.
9 9 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
10 10 # - make historical perf command work correctly with as wide Mercurial
11 11 # version as possible
12 12 #
13 13 # We have to do, if possible with reasonable cost:
14 14 # - make recent perf command for historical feature work correctly
15 15 # with early Mercurial
16 16 #
17 17 # We don't have to do:
18 18 # - make perf command for recent feature work correctly with early
19 19 # Mercurial
20 20
21 21 from __future__ import absolute_import
22 22 import functools
23 23 import gc
24 24 import os
25 25 import random
26 26 import struct
27 27 import sys
28 28 import threading
29 29 import time
30 30 from mercurial import (
31 31 changegroup,
32 32 cmdutil,
33 33 commands,
34 34 copies,
35 35 error,
36 36 extensions,
37 37 mdiff,
38 38 merge,
39 39 revlog,
40 40 util,
41 41 )
42 42
43 43 # for "historical portability":
44 44 # try to import modules separately (in dict order), and ignore
45 45 # failure, because these aren't available with early Mercurial
46 46 try:
47 47 from mercurial import branchmap # since 2.5 (or bcee63733aad)
48 48 except ImportError:
49 49 pass
50 50 try:
51 51 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
52 52 except ImportError:
53 53 pass
54 54 try:
55 55 from mercurial import registrar # since 3.7 (or 37d50250b696)
56 56 dir(registrar) # forcibly load it
57 57 except ImportError:
58 58 registrar = None
59 59 try:
60 60 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
61 61 except ImportError:
62 62 pass
63 63 try:
64 64 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
65 65 except ImportError:
66 66 pass
67 67 try:
68 68 from mercurial import pycompat
69 69 getargspec = pycompat.getargspec # added to module after 4.5
70 70 except (ImportError, AttributeError):
71 71 import inspect
72 72 getargspec = inspect.getargspec
73 73
74 74 try:
75 75 # 4.7+
76 76 queue = pycompat.queue.Queue
77 77 except (AttributeError, ImportError):
78 78 # <4.7.
79 79 try:
80 80 queue = pycompat.queue
81 81 except (AttributeError, ImportError):
82 82 queue = util.queue
83 83
84 84 try:
85 85 from mercurial import logcmdutil
86 86 makelogtemplater = logcmdutil.maketemplater
87 87 except (AttributeError, ImportError):
88 88 try:
89 89 makelogtemplater = cmdutil.makelogtemplater
90 90 except (AttributeError, ImportError):
91 91 makelogtemplater = None
92 92
93 93 # for "historical portability":
94 94 # define util.safehasattr forcibly, because util.safehasattr has been
95 95 # available since 1.9.3 (or 94b200a11cf7)
96 96 _undefined = object()
97 97 def safehasattr(thing, attr):
98 98 return getattr(thing, attr, _undefined) is not _undefined
99 99 setattr(util, 'safehasattr', safehasattr)
100 100
101 101 # for "historical portability":
102 102 # define util.timer forcibly, because util.timer has been available
103 103 # since ae5d60bb70c9
104 104 if safehasattr(time, 'perf_counter'):
105 105 util.timer = time.perf_counter
106 106 elif os.name == 'nt':
107 107 util.timer = time.clock
108 108 else:
109 109 util.timer = time.time
110 110
111 111 # for "historical portability":
112 112 # use locally defined empty option list, if formatteropts isn't
113 113 # available, because commands.formatteropts has been available since
114 114 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
115 115 # available since 2.2 (or ae5f92e154d3)
116 116 formatteropts = getattr(cmdutil, "formatteropts",
117 117 getattr(commands, "formatteropts", []))
118 118
119 119 # for "historical portability":
120 120 # use locally defined option list, if debugrevlogopts isn't available,
121 121 # because commands.debugrevlogopts has been available since 3.7 (or
122 122 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
123 123 # since 1.9 (or a79fea6b3e77).
124 124 revlogopts = getattr(cmdutil, "debugrevlogopts",
125 125 getattr(commands, "debugrevlogopts", [
126 126 ('c', 'changelog', False, ('open changelog')),
127 127 ('m', 'manifest', False, ('open manifest')),
128 128 ('', 'dir', False, ('open directory manifest')),
129 129 ]))
130 130
131 131 cmdtable = {}
132 132
133 133 # for "historical portability":
134 134 # define parsealiases locally, because cmdutil.parsealiases has been
135 135 # available since 1.5 (or 6252852b4332)
136 136 def parsealiases(cmd):
137 137 return cmd.lstrip("^").split("|")
138 138
139 139 if safehasattr(registrar, 'command'):
140 140 command = registrar.command(cmdtable)
141 141 elif safehasattr(cmdutil, 'command'):
142 142 command = cmdutil.command(cmdtable)
143 143 if 'norepo' not in getargspec(command).args:
144 144 # for "historical portability":
145 145 # wrap original cmdutil.command, because "norepo" option has
146 146 # been available since 3.1 (or 75a96326cecb)
147 147 _command = command
148 148 def command(name, options=(), synopsis=None, norepo=False):
149 149 if norepo:
150 150 commands.norepo += ' %s' % ' '.join(parsealiases(name))
151 151 return _command(name, list(options), synopsis)
152 152 else:
153 153 # for "historical portability":
154 154 # define "@command" annotation locally, because cmdutil.command
155 155 # has been available since 1.9 (or 2daa5179e73f)
156 156 def command(name, options=(), synopsis=None, norepo=False):
157 157 def decorator(func):
158 158 if synopsis:
159 159 cmdtable[name] = func, list(options), synopsis
160 160 else:
161 161 cmdtable[name] = func, list(options)
162 162 if norepo:
163 163 commands.norepo += ' %s' % ' '.join(parsealiases(name))
164 164 return func
165 165 return decorator
166 166
167 167 try:
168 168 import mercurial.registrar
169 169 import mercurial.configitems
170 170 configtable = {}
171 171 configitem = mercurial.registrar.configitem(configtable)
172 172 configitem('perf', 'presleep',
173 173 default=mercurial.configitems.dynamicdefault,
174 174 )
175 175 configitem('perf', 'stub',
176 176 default=mercurial.configitems.dynamicdefault,
177 177 )
178 178 configitem('perf', 'parentscount',
179 179 default=mercurial.configitems.dynamicdefault,
180 180 )
181 181 configitem('perf', 'all-timing',
182 182 default=mercurial.configitems.dynamicdefault,
183 183 )
184 184 except (ImportError, AttributeError):
185 185 pass
186 186
187 187 def getlen(ui):
188 188 if ui.configbool("perf", "stub", False):
189 189 return lambda x: 1
190 190 return len
191 191
192 192 def gettimer(ui, opts=None):
193 193 """return a timer function and formatter: (timer, formatter)
194 194
195 195 This function exists to gather the creation of formatter in a single
196 196 place instead of duplicating it in all performance commands."""
197 197
198 198 # enforce an idle period before execution to counteract power management
199 199 # experimental config: perf.presleep
200 200 time.sleep(getint(ui, "perf", "presleep", 1))
201 201
202 202 if opts is None:
203 203 opts = {}
204 204 # redirect all to stderr unless buffer api is in use
205 205 if not ui._buffers:
206 206 ui = ui.copy()
207 207 uifout = safeattrsetter(ui, 'fout', ignoremissing=True)
208 208 if uifout:
209 209 # for "historical portability":
210 210 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
211 211 uifout.set(ui.ferr)
212 212
213 213 # get a formatter
214 214 uiformatter = getattr(ui, 'formatter', None)
215 215 if uiformatter:
216 216 fm = uiformatter('perf', opts)
217 217 else:
218 218 # for "historical portability":
219 219 # define formatter locally, because ui.formatter has been
220 220 # available since 2.2 (or ae5f92e154d3)
221 221 from mercurial import node
222 222 class defaultformatter(object):
223 223 """Minimized composition of baseformatter and plainformatter
224 224 """
225 225 def __init__(self, ui, topic, opts):
226 226 self._ui = ui
227 227 if ui.debugflag:
228 228 self.hexfunc = node.hex
229 229 else:
230 230 self.hexfunc = node.short
231 231 def __nonzero__(self):
232 232 return False
233 233 __bool__ = __nonzero__
234 234 def startitem(self):
235 235 pass
236 236 def data(self, **data):
237 237 pass
238 238 def write(self, fields, deftext, *fielddata, **opts):
239 239 self._ui.write(deftext % fielddata, **opts)
240 240 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
241 241 if cond:
242 242 self._ui.write(deftext % fielddata, **opts)
243 243 def plain(self, text, **opts):
244 244 self._ui.write(text, **opts)
245 245 def end(self):
246 246 pass
247 247 fm = defaultformatter(ui, 'perf', opts)
248 248
249 249 # stub function, runs code only once instead of in a loop
250 250 # experimental config: perf.stub
251 251 if ui.configbool("perf", "stub", False):
252 252 return functools.partial(stub_timer, fm), fm
253 253
254 254 # experimental config: perf.all-timing
255 255 displayall = ui.configbool("perf", "all-timing", False)
256 256 return functools.partial(_timer, fm, displayall=displayall), fm
257 257
258 258 def stub_timer(fm, func, title=None):
259 259 func()
260 260
261 261 def _timer(fm, func, title=None, displayall=False):
262 262 gc.collect()
263 263 results = []
264 264 begin = util.timer()
265 265 count = 0
266 266 while True:
267 267 ostart = os.times()
268 268 cstart = util.timer()
269 269 r = func()
270 270 cstop = util.timer()
271 271 ostop = os.times()
272 272 count += 1
273 273 a, b = ostart, ostop
274 274 results.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
275 275 if cstop - begin > 3 and count >= 100:
276 276 break
277 277 if cstop - begin > 10 and count >= 3:
278 278 break
279 279
280 280 fm.startitem()
281 281
282 282 if title:
283 283 fm.write('title', '! %s\n', title)
284 284 if r:
285 285 fm.write('result', '! result: %s\n', r)
286 286 def display(role, entry):
287 287 prefix = ''
288 288 if role != 'best':
289 289 prefix = '%s.' % role
290 290 fm.plain('!')
291 291 fm.write(prefix + 'wall', ' wall %f', entry[0])
292 292 fm.write(prefix + 'comb', ' comb %f', entry[1] + entry[2])
293 293 fm.write(prefix + 'user', ' user %f', entry[1])
294 294 fm.write(prefix + 'sys', ' sys %f', entry[2])
295 295 fm.write(prefix + 'count', ' (%s of %d)', role, count)
296 296 fm.plain('\n')
297 297 results.sort()
298 298 min_val = results[0]
299 299 display('best', min_val)
300 300 if displayall:
301 301 max_val = results[-1]
302 302 display('max', max_val)
303 303 avg = tuple([sum(x) / count for x in zip(*results)])
304 304 display('avg', avg)
305 305 median = results[len(results) // 2]
306 306 display('median', median)
307 307
308 308 # utilities for historical portability
309 309
310 310 def getint(ui, section, name, default):
311 311 # for "historical portability":
312 312 # ui.configint has been available since 1.9 (or fa2b596db182)
313 313 v = ui.config(section, name, None)
314 314 if v is None:
315 315 return default
316 316 try:
317 317 return int(v)
318 318 except ValueError:
319 319 raise error.ConfigError(("%s.%s is not an integer ('%s')")
320 320 % (section, name, v))
321 321
322 322 def safeattrsetter(obj, name, ignoremissing=False):
323 323 """Ensure that 'obj' has 'name' attribute before subsequent setattr
324 324
325 325 This function is aborted, if 'obj' doesn't have 'name' attribute
326 326 at runtime. This avoids overlooking removal of an attribute, which
327 327 breaks assumption of performance measurement, in the future.
328 328
329 329 This function returns the object to (1) assign a new value, and
330 330 (2) restore an original value to the attribute.
331 331
332 332 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
333 333 abortion, and this function returns None. This is useful to
334 334 examine an attribute, which isn't ensured in all Mercurial
335 335 versions.
336 336 """
337 337 if not util.safehasattr(obj, name):
338 338 if ignoremissing:
339 339 return None
340 340 raise error.Abort(("missing attribute %s of %s might break assumption"
341 341 " of performance measurement") % (name, obj))
342 342
343 343 origvalue = getattr(obj, name)
344 344 class attrutil(object):
345 345 def set(self, newvalue):
346 346 setattr(obj, name, newvalue)
347 347 def restore(self):
348 348 setattr(obj, name, origvalue)
349 349
350 350 return attrutil()
351 351
352 352 # utilities to examine each internal API changes
353 353
354 354 def getbranchmapsubsettable():
355 355 # for "historical portability":
356 356 # subsettable is defined in:
357 357 # - branchmap since 2.9 (or 175c6fd8cacc)
358 358 # - repoview since 2.5 (or 59a9f18d4587)
359 359 for mod in (branchmap, repoview):
360 360 subsettable = getattr(mod, 'subsettable', None)
361 361 if subsettable:
362 362 return subsettable
363 363
364 364 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
365 365 # branchmap and repoview modules exist, but subsettable attribute
366 366 # doesn't)
367 367 raise error.Abort(("perfbranchmap not available with this Mercurial"),
368 368 hint="use 2.5 or later")
369 369
370 370 def getsvfs(repo):
371 371 """Return appropriate object to access files under .hg/store
372 372 """
373 373 # for "historical portability":
374 374 # repo.svfs has been available since 2.3 (or 7034365089bf)
375 375 svfs = getattr(repo, 'svfs', None)
376 376 if svfs:
377 377 return svfs
378 378 else:
379 379 return getattr(repo, 'sopener')
380 380
381 381 def getvfs(repo):
382 382 """Return appropriate object to access files under .hg
383 383 """
384 384 # for "historical portability":
385 385 # repo.vfs has been available since 2.3 (or 7034365089bf)
386 386 vfs = getattr(repo, 'vfs', None)
387 387 if vfs:
388 388 return vfs
389 389 else:
390 390 return getattr(repo, 'opener')
391 391
392 392 def repocleartagscachefunc(repo):
393 393 """Return the function to clear tags cache according to repo internal API
394 394 """
395 395 if util.safehasattr(repo, '_tagscache'): # since 2.0 (or 9dca7653b525)
396 396 # in this case, setattr(repo, '_tagscache', None) or so isn't
397 397 # correct way to clear tags cache, because existing code paths
398 398 # expect _tagscache to be a structured object.
399 399 def clearcache():
400 400 # _tagscache has been filteredpropertycache since 2.5 (or
401 401 # 98c867ac1330), and delattr() can't work in such case
402 402 if '_tagscache' in vars(repo):
403 403 del repo.__dict__['_tagscache']
404 404 return clearcache
405 405
406 406 repotags = safeattrsetter(repo, '_tags', ignoremissing=True)
407 407 if repotags: # since 1.4 (or 5614a628d173)
408 408 return lambda : repotags.set(None)
409 409
410 410 repotagscache = safeattrsetter(repo, 'tagscache', ignoremissing=True)
411 411 if repotagscache: # since 0.6 (or d7df759d0e97)
412 412 return lambda : repotagscache.set(None)
413 413
414 414 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
415 415 # this point, but it isn't so problematic, because:
416 416 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
417 417 # in perftags() causes failure soon
418 418 # - perf.py itself has been available since 1.1 (or eb240755386d)
419 419 raise error.Abort(("tags API of this hg command is unknown"))
420 420
421 421 # utilities to clear cache
422 422
423 423 def clearfilecache(repo, attrname):
424 424 unfi = repo.unfiltered()
425 425 if attrname in vars(unfi):
426 426 delattr(unfi, attrname)
427 427 unfi._filecache.pop(attrname, None)
428 428
429 429 # perf commands
430 430
431 431 @command('perfwalk', formatteropts)
432 432 def perfwalk(ui, repo, *pats, **opts):
433 433 timer, fm = gettimer(ui, opts)
434 434 m = scmutil.match(repo[None], pats, {})
435 435 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
436 436 ignored=False))))
437 437 fm.end()
438 438
439 439 @command('perfannotate', formatteropts)
440 440 def perfannotate(ui, repo, f, **opts):
441 441 timer, fm = gettimer(ui, opts)
442 442 fc = repo['.'][f]
443 443 timer(lambda: len(fc.annotate(True)))
444 444 fm.end()
445 445
446 446 @command('perfstatus',
447 447 [('u', 'unknown', False,
448 448 'ask status to look for unknown files')] + formatteropts)
449 449 def perfstatus(ui, repo, **opts):
450 450 #m = match.always(repo.root, repo.getcwd())
451 451 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
452 452 # False))))
453 453 timer, fm = gettimer(ui, opts)
454 454 timer(lambda: sum(map(len, repo.status(unknown=opts['unknown']))))
455 455 fm.end()
456 456
457 457 @command('perfaddremove', formatteropts)
458 458 def perfaddremove(ui, repo, **opts):
459 459 timer, fm = gettimer(ui, opts)
460 460 try:
461 461 oldquiet = repo.ui.quiet
462 462 repo.ui.quiet = True
463 463 matcher = scmutil.match(repo[None])
464 464 opts['dry_run'] = True
465 465 timer(lambda: scmutil.addremove(repo, matcher, "", opts))
466 466 finally:
467 467 repo.ui.quiet = oldquiet
468 468 fm.end()
469 469
470 470 def clearcaches(cl):
471 471 # behave somewhat consistently across internal API changes
472 472 if util.safehasattr(cl, 'clearcaches'):
473 473 cl.clearcaches()
474 474 elif util.safehasattr(cl, '_nodecache'):
475 475 from mercurial.node import nullid, nullrev
476 476 cl._nodecache = {nullid: nullrev}
477 477 cl._nodepos = None
478 478
479 479 @command('perfheads', formatteropts)
480 480 def perfheads(ui, repo, **opts):
481 481 timer, fm = gettimer(ui, opts)
482 482 cl = repo.changelog
483 483 def d():
484 484 len(cl.headrevs())
485 485 clearcaches(cl)
486 486 timer(d)
487 487 fm.end()
488 488
489 489 @command('perftags', formatteropts)
490 490 def perftags(ui, repo, **opts):
491 491 import mercurial.changelog
492 492 import mercurial.manifest
493 493 timer, fm = gettimer(ui, opts)
494 494 svfs = getsvfs(repo)
495 495 repocleartagscache = repocleartagscachefunc(repo)
496 496 def t():
497 497 repo.changelog = mercurial.changelog.changelog(svfs)
498 498 repo.manifestlog = mercurial.manifest.manifestlog(svfs, repo)
499 499 repocleartagscache()
500 500 return len(repo.tags())
501 501 timer(t)
502 502 fm.end()
503 503
504 504 @command('perfancestors', formatteropts)
505 505 def perfancestors(ui, repo, **opts):
506 506 timer, fm = gettimer(ui, opts)
507 507 heads = repo.changelog.headrevs()
508 508 def d():
509 509 for a in repo.changelog.ancestors(heads):
510 510 pass
511 511 timer(d)
512 512 fm.end()
513 513
514 514 @command('perfancestorset', formatteropts)
515 515 def perfancestorset(ui, repo, revset, **opts):
516 516 timer, fm = gettimer(ui, opts)
517 517 revs = repo.revs(revset)
518 518 heads = repo.changelog.headrevs()
519 519 def d():
520 520 s = repo.changelog.ancestors(heads)
521 521 for rev in revs:
522 522 rev in s
523 523 timer(d)
524 524 fm.end()
525 525
526 526 @command('perfbookmarks', formatteropts)
527 527 def perfbookmarks(ui, repo, **opts):
528 528 """benchmark parsing bookmarks from disk to memory"""
529 529 timer, fm = gettimer(ui, opts)
530 530 def d():
531 531 clearfilecache(repo, '_bookmarks')
532 532 repo._bookmarks
533 533 timer(d)
534 534 fm.end()
535 535
536 536 @command('perfbundleread', formatteropts, 'BUNDLE')
537 537 def perfbundleread(ui, repo, bundlepath, **opts):
538 538 """Benchmark reading of bundle files.
539 539
540 540 This command is meant to isolate the I/O part of bundle reading as
541 541 much as possible.
542 542 """
543 543 from mercurial import (
544 544 bundle2,
545 545 exchange,
546 546 streamclone,
547 547 )
548 548
549 549 def makebench(fn):
550 550 def run():
551 551 with open(bundlepath, 'rb') as fh:
552 552 bundle = exchange.readbundle(ui, fh, bundlepath)
553 553 fn(bundle)
554 554
555 555 return run
556 556
557 557 def makereadnbytes(size):
558 558 def run():
559 559 with open(bundlepath, 'rb') as fh:
560 560 bundle = exchange.readbundle(ui, fh, bundlepath)
561 561 while bundle.read(size):
562 562 pass
563 563
564 564 return run
565 565
566 566 def makestdioread(size):
567 567 def run():
568 568 with open(bundlepath, 'rb') as fh:
569 569 while fh.read(size):
570 570 pass
571 571
572 572 return run
573 573
574 574 # bundle1
575 575
576 576 def deltaiter(bundle):
577 577 for delta in bundle.deltaiter():
578 578 pass
579 579
580 580 def iterchunks(bundle):
581 581 for chunk in bundle.getchunks():
582 582 pass
583 583
584 584 # bundle2
585 585
586 586 def forwardchunks(bundle):
587 587 for chunk in bundle._forwardchunks():
588 588 pass
589 589
590 590 def iterparts(bundle):
591 591 for part in bundle.iterparts():
592 592 pass
593 593
594 594 def iterpartsseekable(bundle):
595 595 for part in bundle.iterparts(seekable=True):
596 596 pass
597 597
598 598 def seek(bundle):
599 599 for part in bundle.iterparts(seekable=True):
600 600 part.seek(0, os.SEEK_END)
601 601
602 602 def makepartreadnbytes(size):
603 603 def run():
604 604 with open(bundlepath, 'rb') as fh:
605 605 bundle = exchange.readbundle(ui, fh, bundlepath)
606 606 for part in bundle.iterparts():
607 607 while part.read(size):
608 608 pass
609 609
610 610 return run
611 611
612 612 benches = [
613 613 (makestdioread(8192), 'read(8k)'),
614 614 (makestdioread(16384), 'read(16k)'),
615 615 (makestdioread(32768), 'read(32k)'),
616 616 (makestdioread(131072), 'read(128k)'),
617 617 ]
618 618
619 619 with open(bundlepath, 'rb') as fh:
620 620 bundle = exchange.readbundle(ui, fh, bundlepath)
621 621
622 622 if isinstance(bundle, changegroup.cg1unpacker):
623 623 benches.extend([
624 624 (makebench(deltaiter), 'cg1 deltaiter()'),
625 625 (makebench(iterchunks), 'cg1 getchunks()'),
626 626 (makereadnbytes(8192), 'cg1 read(8k)'),
627 627 (makereadnbytes(16384), 'cg1 read(16k)'),
628 628 (makereadnbytes(32768), 'cg1 read(32k)'),
629 629 (makereadnbytes(131072), 'cg1 read(128k)'),
630 630 ])
631 631 elif isinstance(bundle, bundle2.unbundle20):
632 632 benches.extend([
633 633 (makebench(forwardchunks), 'bundle2 forwardchunks()'),
634 634 (makebench(iterparts), 'bundle2 iterparts()'),
635 635 (makebench(iterpartsseekable), 'bundle2 iterparts() seekable'),
636 636 (makebench(seek), 'bundle2 part seek()'),
637 637 (makepartreadnbytes(8192), 'bundle2 part read(8k)'),
638 638 (makepartreadnbytes(16384), 'bundle2 part read(16k)'),
639 639 (makepartreadnbytes(32768), 'bundle2 part read(32k)'),
640 640 (makepartreadnbytes(131072), 'bundle2 part read(128k)'),
641 641 ])
642 642 elif isinstance(bundle, streamclone.streamcloneapplier):
643 643 raise error.Abort('stream clone bundles not supported')
644 644 else:
645 645 raise error.Abort('unhandled bundle type: %s' % type(bundle))
646 646
647 647 for fn, title in benches:
648 648 timer, fm = gettimer(ui, opts)
649 649 timer(fn, title=title)
650 650 fm.end()
651 651
652 652 @command('perfchangegroupchangelog', formatteropts +
653 653 [('', 'version', '02', 'changegroup version'),
654 654 ('r', 'rev', '', 'revisions to add to changegroup')])
655 655 def perfchangegroupchangelog(ui, repo, version='02', rev=None, **opts):
656 656 """Benchmark producing a changelog group for a changegroup.
657 657
658 658 This measures the time spent processing the changelog during a
659 659 bundle operation. This occurs during `hg bundle` and on a server
660 660 processing a `getbundle` wire protocol request (handles clones
661 661 and pull requests).
662 662
663 663 By default, all revisions are added to the changegroup.
664 664 """
665 665 cl = repo.changelog
666 666 revs = [cl.lookup(r) for r in repo.revs(rev or 'all()')]
667 667 bundler = changegroup.getbundler(version, repo)
668 668
669 669 def lookup(node):
670 670 # The real bundler reads the revision in order to access the
671 671 # manifest node and files list. Do that here.
672 672 cl.read(node)
673 673 return node
674 674
675 675 def d():
676 676 for chunk in bundler.group(revs, cl, lookup):
677 677 pass
678 678
679 679 timer, fm = gettimer(ui, opts)
680 680 timer(d)
681 681 fm.end()
682 682
683 683 @command('perfdirs', formatteropts)
684 684 def perfdirs(ui, repo, **opts):
685 685 timer, fm = gettimer(ui, opts)
686 686 dirstate = repo.dirstate
687 687 'a' in dirstate
688 688 def d():
689 689 dirstate.hasdir('a')
690 690 del dirstate._map._dirs
691 691 timer(d)
692 692 fm.end()
693 693
694 694 @command('perfdirstate', formatteropts)
695 695 def perfdirstate(ui, repo, **opts):
696 696 timer, fm = gettimer(ui, opts)
697 697 "a" in repo.dirstate
698 698 def d():
699 699 repo.dirstate.invalidate()
700 700 "a" in repo.dirstate
701 701 timer(d)
702 702 fm.end()
703 703
704 704 @command('perfdirstatedirs', formatteropts)
705 705 def perfdirstatedirs(ui, repo, **opts):
706 706 timer, fm = gettimer(ui, opts)
707 707 "a" in repo.dirstate
708 708 def d():
709 709 repo.dirstate.hasdir("a")
710 710 del repo.dirstate._map._dirs
711 711 timer(d)
712 712 fm.end()
713 713
714 714 @command('perfdirstatefoldmap', formatteropts)
715 715 def perfdirstatefoldmap(ui, repo, **opts):
716 716 timer, fm = gettimer(ui, opts)
717 717 dirstate = repo.dirstate
718 718 'a' in dirstate
719 719 def d():
720 720 dirstate._map.filefoldmap.get('a')
721 721 del dirstate._map.filefoldmap
722 722 timer(d)
723 723 fm.end()
724 724
725 725 @command('perfdirfoldmap', formatteropts)
726 726 def perfdirfoldmap(ui, repo, **opts):
727 727 timer, fm = gettimer(ui, opts)
728 728 dirstate = repo.dirstate
729 729 'a' in dirstate
730 730 def d():
731 731 dirstate._map.dirfoldmap.get('a')
732 732 del dirstate._map.dirfoldmap
733 733 del dirstate._map._dirs
734 734 timer(d)
735 735 fm.end()
736 736
737 737 @command('perfdirstatewrite', formatteropts)
738 738 def perfdirstatewrite(ui, repo, **opts):
739 739 timer, fm = gettimer(ui, opts)
740 740 ds = repo.dirstate
741 741 "a" in ds
742 742 def d():
743 743 ds._dirty = True
744 744 ds.write(repo.currenttransaction())
745 745 timer(d)
746 746 fm.end()
747 747
748 748 @command('perfmergecalculate',
749 749 [('r', 'rev', '.', 'rev to merge against')] + formatteropts)
750 750 def perfmergecalculate(ui, repo, rev, **opts):
751 751 timer, fm = gettimer(ui, opts)
752 752 wctx = repo[None]
753 753 rctx = scmutil.revsingle(repo, rev, rev)
754 754 ancestor = wctx.ancestor(rctx)
755 755 # we don't want working dir files to be stat'd in the benchmark, so prime
756 756 # that cache
757 757 wctx.dirty()
758 758 def d():
759 759 # acceptremote is True because we don't want prompts in the middle of
760 760 # our benchmark
761 761 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
762 762 acceptremote=True, followcopies=True)
763 763 timer(d)
764 764 fm.end()
765 765
766 766 @command('perfpathcopies', [], "REV REV")
767 767 def perfpathcopies(ui, repo, rev1, rev2, **opts):
768 768 timer, fm = gettimer(ui, opts)
769 769 ctx1 = scmutil.revsingle(repo, rev1, rev1)
770 770 ctx2 = scmutil.revsingle(repo, rev2, rev2)
771 771 def d():
772 772 copies.pathcopies(ctx1, ctx2)
773 773 timer(d)
774 774 fm.end()
775 775
776 776 @command('perfphases',
777 777 [('', 'full', False, 'include file reading time too'),
778 778 ], "")
779 779 def perfphases(ui, repo, **opts):
780 780 """benchmark phasesets computation"""
781 781 timer, fm = gettimer(ui, opts)
782 782 _phases = repo._phasecache
783 783 full = opts.get('full')
784 784 def d():
785 785 phases = _phases
786 786 if full:
787 787 clearfilecache(repo, '_phasecache')
788 788 phases = repo._phasecache
789 789 phases.invalidate()
790 790 phases.loadphaserevs(repo)
791 791 timer(d)
792 792 fm.end()
793 793
794 @command('perfmanifest', [], 'REV')
795 def perfmanifest(ui, repo, rev, **opts):
794 @command('perfmanifest',[
795 ('m', 'manifest-rev', False, 'Look up a manifest node revision'),
796 ('', 'clear-disk', False, 'clear on-disk caches too'),
797 ], 'REV|NODE')
798 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
796 799 """benchmark the time to read a manifest from disk and return a usable
797 800 dict-like object
798 801
799 802 Manifest caches are cleared before retrieval."""
800 803 timer, fm = gettimer(ui, opts)
804 if not manifest_rev:
801 805 ctx = scmutil.revsingle(repo, rev, rev)
802 806 t = ctx.manifestnode()
807 else:
808 t = repo.manifestlog._revlog.lookup(rev)
803 809 def d():
804 810 repo.manifestlog.clearcaches()
805 811 repo.manifestlog[t].read()
806 812 timer(d)
807 813 fm.end()
808 814
809 815 @command('perfchangeset', formatteropts)
810 816 def perfchangeset(ui, repo, rev, **opts):
811 817 timer, fm = gettimer(ui, opts)
812 818 n = scmutil.revsingle(repo, rev).node()
813 819 def d():
814 820 repo.changelog.read(n)
815 821 #repo.changelog._cache = None
816 822 timer(d)
817 823 fm.end()
818 824
819 825 @command('perfindex', formatteropts)
820 826 def perfindex(ui, repo, **opts):
821 827 import mercurial.revlog
822 828 timer, fm = gettimer(ui, opts)
823 829 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
824 830 n = repo["tip"].node()
825 831 svfs = getsvfs(repo)
826 832 def d():
827 833 cl = mercurial.revlog.revlog(svfs, "00changelog.i")
828 834 cl.rev(n)
829 835 timer(d)
830 836 fm.end()
831 837
832 838 @command('perfstartup', formatteropts)
833 839 def perfstartup(ui, repo, **opts):
834 840 timer, fm = gettimer(ui, opts)
835 841 cmd = sys.argv[0]
836 842 def d():
837 843 if os.name != 'nt':
838 844 os.system("HGRCPATH= %s version -q > /dev/null" % cmd)
839 845 else:
840 846 os.environ['HGRCPATH'] = ' '
841 847 os.system("%s version -q > NUL" % cmd)
842 848 timer(d)
843 849 fm.end()
844 850
845 851 @command('perfparents', formatteropts)
846 852 def perfparents(ui, repo, **opts):
847 853 timer, fm = gettimer(ui, opts)
848 854 # control the number of commits perfparents iterates over
849 855 # experimental config: perf.parentscount
850 856 count = getint(ui, "perf", "parentscount", 1000)
851 857 if len(repo.changelog) < count:
852 858 raise error.Abort("repo needs %d commits for this test" % count)
853 859 repo = repo.unfiltered()
854 860 nl = [repo.changelog.node(i) for i in xrange(count)]
855 861 def d():
856 862 for n in nl:
857 863 repo.changelog.parents(n)
858 864 timer(d)
859 865 fm.end()
860 866
861 867 @command('perfctxfiles', formatteropts)
862 868 def perfctxfiles(ui, repo, x, **opts):
863 869 x = int(x)
864 870 timer, fm = gettimer(ui, opts)
865 871 def d():
866 872 len(repo[x].files())
867 873 timer(d)
868 874 fm.end()
869 875
870 876 @command('perfrawfiles', formatteropts)
871 877 def perfrawfiles(ui, repo, x, **opts):
872 878 x = int(x)
873 879 timer, fm = gettimer(ui, opts)
874 880 cl = repo.changelog
875 881 def d():
876 882 len(cl.read(x)[3])
877 883 timer(d)
878 884 fm.end()
879 885
880 886 @command('perflookup', formatteropts)
881 887 def perflookup(ui, repo, rev, **opts):
882 888 timer, fm = gettimer(ui, opts)
883 889 timer(lambda: len(repo.lookup(rev)))
884 890 fm.end()
885 891
886 892 @command('perfrevrange', formatteropts)
887 893 def perfrevrange(ui, repo, *specs, **opts):
888 894 timer, fm = gettimer(ui, opts)
889 895 revrange = scmutil.revrange
890 896 timer(lambda: len(revrange(repo, specs)))
891 897 fm.end()
892 898
893 899 @command('perfnodelookup', formatteropts)
894 900 def perfnodelookup(ui, repo, rev, **opts):
895 901 timer, fm = gettimer(ui, opts)
896 902 import mercurial.revlog
897 903 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
898 904 n = scmutil.revsingle(repo, rev).node()
899 905 cl = mercurial.revlog.revlog(getsvfs(repo), "00changelog.i")
900 906 def d():
901 907 cl.rev(n)
902 908 clearcaches(cl)
903 909 timer(d)
904 910 fm.end()
905 911
906 912 @command('perflog',
907 913 [('', 'rename', False, 'ask log to follow renames')] + formatteropts)
908 914 def perflog(ui, repo, rev=None, **opts):
909 915 if rev is None:
910 916 rev=[]
911 917 timer, fm = gettimer(ui, opts)
912 918 ui.pushbuffer()
913 919 timer(lambda: commands.log(ui, repo, rev=rev, date='', user='',
914 920 copies=opts.get('rename')))
915 921 ui.popbuffer()
916 922 fm.end()
917 923
918 924 @command('perfmoonwalk', formatteropts)
919 925 def perfmoonwalk(ui, repo, **opts):
920 926 """benchmark walking the changelog backwards
921 927
922 928 This also loads the changelog data for each revision in the changelog.
923 929 """
924 930 timer, fm = gettimer(ui, opts)
925 931 def moonwalk():
926 932 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
927 933 ctx = repo[i]
928 934 ctx.branch() # read changelog data (in addition to the index)
929 935 timer(moonwalk)
930 936 fm.end()
931 937
932 938 @command('perftemplating',
933 939 [('r', 'rev', [], 'revisions to run the template on'),
934 940 ] + formatteropts)
935 941 def perftemplating(ui, repo, testedtemplate=None, **opts):
936 942 """test the rendering time of a given template"""
937 943 if makelogtemplater is None:
938 944 raise error.Abort(("perftemplating not available with this Mercurial"),
939 945 hint="use 4.3 or later")
940 946
941 947 nullui = ui.copy()
942 948 nullui.fout = open(os.devnull, 'wb')
943 949 nullui.disablepager()
944 950 revs = opts.get('rev')
945 951 if not revs:
946 952 revs = ['all()']
947 953 revs = list(scmutil.revrange(repo, revs))
948 954
949 955 defaulttemplate = ('{date|shortdate} [{rev}:{node|short}]'
950 956 ' {author|person}: {desc|firstline}\n')
951 957 if testedtemplate is None:
952 958 testedtemplate = defaulttemplate
953 959 displayer = makelogtemplater(nullui, repo, testedtemplate)
954 960 def format():
955 961 for r in revs:
956 962 ctx = repo[r]
957 963 displayer.show(ctx)
958 964 displayer.flush(ctx)
959 965
960 966 timer, fm = gettimer(ui, opts)
961 967 timer(format)
962 968 fm.end()
963 969
964 970 @command('perfcca', formatteropts)
965 971 def perfcca(ui, repo, **opts):
966 972 timer, fm = gettimer(ui, opts)
967 973 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
968 974 fm.end()
969 975
970 976 @command('perffncacheload', formatteropts)
971 977 def perffncacheload(ui, repo, **opts):
972 978 timer, fm = gettimer(ui, opts)
973 979 s = repo.store
974 980 def d():
975 981 s.fncache._load()
976 982 timer(d)
977 983 fm.end()
978 984
979 985 @command('perffncachewrite', formatteropts)
980 986 def perffncachewrite(ui, repo, **opts):
981 987 timer, fm = gettimer(ui, opts)
982 988 s = repo.store
983 989 lock = repo.lock()
984 990 s.fncache._load()
985 991 tr = repo.transaction('perffncachewrite')
986 992 tr.addbackup('fncache')
987 993 def d():
988 994 s.fncache._dirty = True
989 995 s.fncache.write(tr)
990 996 timer(d)
991 997 tr.close()
992 998 lock.release()
993 999 fm.end()
994 1000
995 1001 @command('perffncacheencode', formatteropts)
996 1002 def perffncacheencode(ui, repo, **opts):
997 1003 timer, fm = gettimer(ui, opts)
998 1004 s = repo.store
999 1005 s.fncache._load()
1000 1006 def d():
1001 1007 for p in s.fncache.entries:
1002 1008 s.encode(p)
1003 1009 timer(d)
1004 1010 fm.end()
1005 1011
1006 1012 def _bdiffworker(q, blocks, xdiff, ready, done):
1007 1013 while not done.is_set():
1008 1014 pair = q.get()
1009 1015 while pair is not None:
1010 1016 if xdiff:
1011 1017 mdiff.bdiff.xdiffblocks(*pair)
1012 1018 elif blocks:
1013 1019 mdiff.bdiff.blocks(*pair)
1014 1020 else:
1015 1021 mdiff.textdiff(*pair)
1016 1022 q.task_done()
1017 1023 pair = q.get()
1018 1024 q.task_done() # for the None one
1019 1025 with ready:
1020 1026 ready.wait()
1021 1027
1022 1028 @command('perfbdiff', revlogopts + formatteropts + [
1023 1029 ('', 'count', 1, 'number of revisions to test (when using --startrev)'),
1024 1030 ('', 'alldata', False, 'test bdiffs for all associated revisions'),
1025 1031 ('', 'threads', 0, 'number of thread to use (disable with 0)'),
1026 1032 ('', 'blocks', False, 'test computing diffs into blocks'),
1027 1033 ('', 'xdiff', False, 'use xdiff algorithm'),
1028 1034 ],
1029 1035
1030 1036 '-c|-m|FILE REV')
1031 1037 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1032 1038 """benchmark a bdiff between revisions
1033 1039
1034 1040 By default, benchmark a bdiff between its delta parent and itself.
1035 1041
1036 1042 With ``--count``, benchmark bdiffs between delta parents and self for N
1037 1043 revisions starting at the specified revision.
1038 1044
1039 1045 With ``--alldata``, assume the requested revision is a changeset and
1040 1046 measure bdiffs for all changes related to that changeset (manifest
1041 1047 and filelogs).
1042 1048 """
1043 1049 opts = pycompat.byteskwargs(opts)
1044 1050
1045 1051 if opts['xdiff'] and not opts['blocks']:
1046 1052 raise error.CommandError('perfbdiff', '--xdiff requires --blocks')
1047 1053
1048 1054 if opts['alldata']:
1049 1055 opts['changelog'] = True
1050 1056
1051 1057 if opts.get('changelog') or opts.get('manifest'):
1052 1058 file_, rev = None, file_
1053 1059 elif rev is None:
1054 1060 raise error.CommandError('perfbdiff', 'invalid arguments')
1055 1061
1056 1062 blocks = opts['blocks']
1057 1063 xdiff = opts['xdiff']
1058 1064 textpairs = []
1059 1065
1060 1066 r = cmdutil.openrevlog(repo, 'perfbdiff', file_, opts)
1061 1067
1062 1068 startrev = r.rev(r.lookup(rev))
1063 1069 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1064 1070 if opts['alldata']:
1065 1071 # Load revisions associated with changeset.
1066 1072 ctx = repo[rev]
1067 1073 mtext = repo.manifestlog._revlog.revision(ctx.manifestnode())
1068 1074 for pctx in ctx.parents():
1069 1075 pman = repo.manifestlog._revlog.revision(pctx.manifestnode())
1070 1076 textpairs.append((pman, mtext))
1071 1077
1072 1078 # Load filelog revisions by iterating manifest delta.
1073 1079 man = ctx.manifest()
1074 1080 pman = ctx.p1().manifest()
1075 1081 for filename, change in pman.diff(man).items():
1076 1082 fctx = repo.file(filename)
1077 1083 f1 = fctx.revision(change[0][0] or -1)
1078 1084 f2 = fctx.revision(change[1][0] or -1)
1079 1085 textpairs.append((f1, f2))
1080 1086 else:
1081 1087 dp = r.deltaparent(rev)
1082 1088 textpairs.append((r.revision(dp), r.revision(rev)))
1083 1089
1084 1090 withthreads = threads > 0
1085 1091 if not withthreads:
1086 1092 def d():
1087 1093 for pair in textpairs:
1088 1094 if xdiff:
1089 1095 mdiff.bdiff.xdiffblocks(*pair)
1090 1096 elif blocks:
1091 1097 mdiff.bdiff.blocks(*pair)
1092 1098 else:
1093 1099 mdiff.textdiff(*pair)
1094 1100 else:
1095 1101 q = queue()
1096 1102 for i in xrange(threads):
1097 1103 q.put(None)
1098 1104 ready = threading.Condition()
1099 1105 done = threading.Event()
1100 1106 for i in xrange(threads):
1101 1107 threading.Thread(target=_bdiffworker,
1102 1108 args=(q, blocks, xdiff, ready, done)).start()
1103 1109 q.join()
1104 1110 def d():
1105 1111 for pair in textpairs:
1106 1112 q.put(pair)
1107 1113 for i in xrange(threads):
1108 1114 q.put(None)
1109 1115 with ready:
1110 1116 ready.notify_all()
1111 1117 q.join()
1112 1118 timer, fm = gettimer(ui, opts)
1113 1119 timer(d)
1114 1120 fm.end()
1115 1121
1116 1122 if withthreads:
1117 1123 done.set()
1118 1124 for i in xrange(threads):
1119 1125 q.put(None)
1120 1126 with ready:
1121 1127 ready.notify_all()
1122 1128
1123 1129 @command('perfunidiff', revlogopts + formatteropts + [
1124 1130 ('', 'count', 1, 'number of revisions to test (when using --startrev)'),
1125 1131 ('', 'alldata', False, 'test unidiffs for all associated revisions'),
1126 1132 ], '-c|-m|FILE REV')
1127 1133 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1128 1134 """benchmark a unified diff between revisions
1129 1135
1130 1136 This doesn't include any copy tracing - it's just a unified diff
1131 1137 of the texts.
1132 1138
1133 1139 By default, benchmark a diff between its delta parent and itself.
1134 1140
1135 1141 With ``--count``, benchmark diffs between delta parents and self for N
1136 1142 revisions starting at the specified revision.
1137 1143
1138 1144 With ``--alldata``, assume the requested revision is a changeset and
1139 1145 measure diffs for all changes related to that changeset (manifest
1140 1146 and filelogs).
1141 1147 """
1142 1148 if opts['alldata']:
1143 1149 opts['changelog'] = True
1144 1150
1145 1151 if opts.get('changelog') or opts.get('manifest'):
1146 1152 file_, rev = None, file_
1147 1153 elif rev is None:
1148 1154 raise error.CommandError('perfunidiff', 'invalid arguments')
1149 1155
1150 1156 textpairs = []
1151 1157
1152 1158 r = cmdutil.openrevlog(repo, 'perfunidiff', file_, opts)
1153 1159
1154 1160 startrev = r.rev(r.lookup(rev))
1155 1161 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1156 1162 if opts['alldata']:
1157 1163 # Load revisions associated with changeset.
1158 1164 ctx = repo[rev]
1159 1165 mtext = repo.manifestlog._revlog.revision(ctx.manifestnode())
1160 1166 for pctx in ctx.parents():
1161 1167 pman = repo.manifestlog._revlog.revision(pctx.manifestnode())
1162 1168 textpairs.append((pman, mtext))
1163 1169
1164 1170 # Load filelog revisions by iterating manifest delta.
1165 1171 man = ctx.manifest()
1166 1172 pman = ctx.p1().manifest()
1167 1173 for filename, change in pman.diff(man).items():
1168 1174 fctx = repo.file(filename)
1169 1175 f1 = fctx.revision(change[0][0] or -1)
1170 1176 f2 = fctx.revision(change[1][0] or -1)
1171 1177 textpairs.append((f1, f2))
1172 1178 else:
1173 1179 dp = r.deltaparent(rev)
1174 1180 textpairs.append((r.revision(dp), r.revision(rev)))
1175 1181
1176 1182 def d():
1177 1183 for left, right in textpairs:
1178 1184 # The date strings don't matter, so we pass empty strings.
1179 1185 headerlines, hunks = mdiff.unidiff(
1180 1186 left, '', right, '', 'left', 'right', binary=False)
1181 1187 # consume iterators in roughly the way patch.py does
1182 1188 b'\n'.join(headerlines)
1183 1189 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1184 1190 timer, fm = gettimer(ui, opts)
1185 1191 timer(d)
1186 1192 fm.end()
1187 1193
1188 1194 @command('perfdiffwd', formatteropts)
1189 1195 def perfdiffwd(ui, repo, **opts):
1190 1196 """Profile diff of working directory changes"""
1191 1197 timer, fm = gettimer(ui, opts)
1192 1198 options = {
1193 1199 'w': 'ignore_all_space',
1194 1200 'b': 'ignore_space_change',
1195 1201 'B': 'ignore_blank_lines',
1196 1202 }
1197 1203
1198 1204 for diffopt in ('', 'w', 'b', 'B', 'wB'):
1199 1205 opts = dict((options[c], '1') for c in diffopt)
1200 1206 def d():
1201 1207 ui.pushbuffer()
1202 1208 commands.diff(ui, repo, **opts)
1203 1209 ui.popbuffer()
1204 1210 title = 'diffopts: %s' % (diffopt and ('-' + diffopt) or 'none')
1205 1211 timer(d, title)
1206 1212 fm.end()
1207 1213
1208 1214 @command('perfrevlogindex', revlogopts + formatteropts,
1209 1215 '-c|-m|FILE')
1210 1216 def perfrevlogindex(ui, repo, file_=None, **opts):
1211 1217 """Benchmark operations against a revlog index.
1212 1218
1213 1219 This tests constructing a revlog instance, reading index data,
1214 1220 parsing index data, and performing various operations related to
1215 1221 index data.
1216 1222 """
1217 1223
1218 1224 rl = cmdutil.openrevlog(repo, 'perfrevlogindex', file_, opts)
1219 1225
1220 1226 opener = getattr(rl, 'opener') # trick linter
1221 1227 indexfile = rl.indexfile
1222 1228 data = opener.read(indexfile)
1223 1229
1224 1230 header = struct.unpack('>I', data[0:4])[0]
1225 1231 version = header & 0xFFFF
1226 1232 if version == 1:
1227 1233 revlogio = revlog.revlogio()
1228 1234 inline = header & (1 << 16)
1229 1235 else:
1230 1236 raise error.Abort(('unsupported revlog version: %d') % version)
1231 1237
1232 1238 rllen = len(rl)
1233 1239
1234 1240 node0 = rl.node(0)
1235 1241 node25 = rl.node(rllen // 4)
1236 1242 node50 = rl.node(rllen // 2)
1237 1243 node75 = rl.node(rllen // 4 * 3)
1238 1244 node100 = rl.node(rllen - 1)
1239 1245
1240 1246 allrevs = range(rllen)
1241 1247 allrevsrev = list(reversed(allrevs))
1242 1248 allnodes = [rl.node(rev) for rev in range(rllen)]
1243 1249 allnodesrev = list(reversed(allnodes))
1244 1250
1245 1251 def constructor():
1246 1252 revlog.revlog(opener, indexfile)
1247 1253
1248 1254 def read():
1249 1255 with opener(indexfile) as fh:
1250 1256 fh.read()
1251 1257
1252 1258 def parseindex():
1253 1259 revlogio.parseindex(data, inline)
1254 1260
1255 1261 def getentry(revornode):
1256 1262 index = revlogio.parseindex(data, inline)[0]
1257 1263 index[revornode]
1258 1264
1259 1265 def getentries(revs, count=1):
1260 1266 index = revlogio.parseindex(data, inline)[0]
1261 1267
1262 1268 for i in range(count):
1263 1269 for rev in revs:
1264 1270 index[rev]
1265 1271
1266 1272 def resolvenode(node):
1267 1273 nodemap = revlogio.parseindex(data, inline)[1]
1268 1274 # This only works for the C code.
1269 1275 if nodemap is None:
1270 1276 return
1271 1277
1272 1278 try:
1273 1279 nodemap[node]
1274 1280 except error.RevlogError:
1275 1281 pass
1276 1282
1277 1283 def resolvenodes(nodes, count=1):
1278 1284 nodemap = revlogio.parseindex(data, inline)[1]
1279 1285 if nodemap is None:
1280 1286 return
1281 1287
1282 1288 for i in range(count):
1283 1289 for node in nodes:
1284 1290 try:
1285 1291 nodemap[node]
1286 1292 except error.RevlogError:
1287 1293 pass
1288 1294
1289 1295 benches = [
1290 1296 (constructor, 'revlog constructor'),
1291 1297 (read, 'read'),
1292 1298 (parseindex, 'create index object'),
1293 1299 (lambda: getentry(0), 'retrieve index entry for rev 0'),
1294 1300 (lambda: resolvenode('a' * 20), 'look up missing node'),
1295 1301 (lambda: resolvenode(node0), 'look up node at rev 0'),
1296 1302 (lambda: resolvenode(node25), 'look up node at 1/4 len'),
1297 1303 (lambda: resolvenode(node50), 'look up node at 1/2 len'),
1298 1304 (lambda: resolvenode(node75), 'look up node at 3/4 len'),
1299 1305 (lambda: resolvenode(node100), 'look up node at tip'),
1300 1306 # 2x variation is to measure caching impact.
1301 1307 (lambda: resolvenodes(allnodes),
1302 1308 'look up all nodes (forward)'),
1303 1309 (lambda: resolvenodes(allnodes, 2),
1304 1310 'look up all nodes 2x (forward)'),
1305 1311 (lambda: resolvenodes(allnodesrev),
1306 1312 'look up all nodes (reverse)'),
1307 1313 (lambda: resolvenodes(allnodesrev, 2),
1308 1314 'look up all nodes 2x (reverse)'),
1309 1315 (lambda: getentries(allrevs),
1310 1316 'retrieve all index entries (forward)'),
1311 1317 (lambda: getentries(allrevs, 2),
1312 1318 'retrieve all index entries 2x (forward)'),
1313 1319 (lambda: getentries(allrevsrev),
1314 1320 'retrieve all index entries (reverse)'),
1315 1321 (lambda: getentries(allrevsrev, 2),
1316 1322 'retrieve all index entries 2x (reverse)'),
1317 1323 ]
1318 1324
1319 1325 for fn, title in benches:
1320 1326 timer, fm = gettimer(ui, opts)
1321 1327 timer(fn, title=title)
1322 1328 fm.end()
1323 1329
1324 1330 @command('perfrevlogrevisions', revlogopts + formatteropts +
1325 1331 [('d', 'dist', 100, 'distance between the revisions'),
1326 1332 ('s', 'startrev', 0, 'revision to start reading at'),
1327 1333 ('', 'reverse', False, 'read in reverse')],
1328 1334 '-c|-m|FILE')
1329 1335 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
1330 1336 **opts):
1331 1337 """Benchmark reading a series of revisions from a revlog.
1332 1338
1333 1339 By default, we read every ``-d/--dist`` revision from 0 to tip of
1334 1340 the specified revlog.
1335 1341
1336 1342 The start revision can be defined via ``-s/--startrev``.
1337 1343 """
1338 1344 rl = cmdutil.openrevlog(repo, 'perfrevlogrevisions', file_, opts)
1339 1345 rllen = getlen(ui)(rl)
1340 1346
1341 1347 def d():
1342 1348 rl.clearcaches()
1343 1349
1344 1350 beginrev = startrev
1345 1351 endrev = rllen
1346 1352 dist = opts['dist']
1347 1353
1348 1354 if reverse:
1349 1355 beginrev, endrev = endrev, beginrev
1350 1356 dist = -1 * dist
1351 1357
1352 1358 for x in xrange(beginrev, endrev, dist):
1353 1359 # Old revisions don't support passing int.
1354 1360 n = rl.node(x)
1355 1361 rl.revision(n)
1356 1362
1357 1363 timer, fm = gettimer(ui, opts)
1358 1364 timer(d)
1359 1365 fm.end()
1360 1366
1361 1367 @command('perfrevlogchunks', revlogopts + formatteropts +
1362 1368 [('e', 'engines', '', 'compression engines to use'),
1363 1369 ('s', 'startrev', 0, 'revision to start at')],
1364 1370 '-c|-m|FILE')
1365 1371 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
1366 1372 """Benchmark operations on revlog chunks.
1367 1373
1368 1374 Logically, each revlog is a collection of fulltext revisions. However,
1369 1375 stored within each revlog are "chunks" of possibly compressed data. This
1370 1376 data needs to be read and decompressed or compressed and written.
1371 1377
1372 1378 This command measures the time it takes to read+decompress and recompress
1373 1379 chunks in a revlog. It effectively isolates I/O and compression performance.
1374 1380 For measurements of higher-level operations like resolving revisions,
1375 1381 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
1376 1382 """
1377 1383 rl = cmdutil.openrevlog(repo, 'perfrevlogchunks', file_, opts)
1378 1384
1379 1385 # _chunkraw was renamed to _getsegmentforrevs.
1380 1386 try:
1381 1387 segmentforrevs = rl._getsegmentforrevs
1382 1388 except AttributeError:
1383 1389 segmentforrevs = rl._chunkraw
1384 1390
1385 1391 # Verify engines argument.
1386 1392 if engines:
1387 1393 engines = set(e.strip() for e in engines.split(','))
1388 1394 for engine in engines:
1389 1395 try:
1390 1396 util.compressionengines[engine]
1391 1397 except KeyError:
1392 1398 raise error.Abort('unknown compression engine: %s' % engine)
1393 1399 else:
1394 1400 engines = []
1395 1401 for e in util.compengines:
1396 1402 engine = util.compengines[e]
1397 1403 try:
1398 1404 if engine.available():
1399 1405 engine.revlogcompressor().compress('dummy')
1400 1406 engines.append(e)
1401 1407 except NotImplementedError:
1402 1408 pass
1403 1409
1404 1410 revs = list(rl.revs(startrev, len(rl) - 1))
1405 1411
1406 1412 def rlfh(rl):
1407 1413 if rl._inline:
1408 1414 return getsvfs(repo)(rl.indexfile)
1409 1415 else:
1410 1416 return getsvfs(repo)(rl.datafile)
1411 1417
1412 1418 def doread():
1413 1419 rl.clearcaches()
1414 1420 for rev in revs:
1415 1421 segmentforrevs(rev, rev)
1416 1422
1417 1423 def doreadcachedfh():
1418 1424 rl.clearcaches()
1419 1425 fh = rlfh(rl)
1420 1426 for rev in revs:
1421 1427 segmentforrevs(rev, rev, df=fh)
1422 1428
1423 1429 def doreadbatch():
1424 1430 rl.clearcaches()
1425 1431 segmentforrevs(revs[0], revs[-1])
1426 1432
1427 1433 def doreadbatchcachedfh():
1428 1434 rl.clearcaches()
1429 1435 fh = rlfh(rl)
1430 1436 segmentforrevs(revs[0], revs[-1], df=fh)
1431 1437
1432 1438 def dochunk():
1433 1439 rl.clearcaches()
1434 1440 fh = rlfh(rl)
1435 1441 for rev in revs:
1436 1442 rl._chunk(rev, df=fh)
1437 1443
1438 1444 chunks = [None]
1439 1445
1440 1446 def dochunkbatch():
1441 1447 rl.clearcaches()
1442 1448 fh = rlfh(rl)
1443 1449 # Save chunks as a side-effect.
1444 1450 chunks[0] = rl._chunks(revs, df=fh)
1445 1451
1446 1452 def docompress(compressor):
1447 1453 rl.clearcaches()
1448 1454
1449 1455 try:
1450 1456 # Swap in the requested compression engine.
1451 1457 oldcompressor = rl._compressor
1452 1458 rl._compressor = compressor
1453 1459 for chunk in chunks[0]:
1454 1460 rl.compress(chunk)
1455 1461 finally:
1456 1462 rl._compressor = oldcompressor
1457 1463
1458 1464 benches = [
1459 1465 (lambda: doread(), 'read'),
1460 1466 (lambda: doreadcachedfh(), 'read w/ reused fd'),
1461 1467 (lambda: doreadbatch(), 'read batch'),
1462 1468 (lambda: doreadbatchcachedfh(), 'read batch w/ reused fd'),
1463 1469 (lambda: dochunk(), 'chunk'),
1464 1470 (lambda: dochunkbatch(), 'chunk batch'),
1465 1471 ]
1466 1472
1467 1473 for engine in sorted(engines):
1468 1474 compressor = util.compengines[engine].revlogcompressor()
1469 1475 benches.append((functools.partial(docompress, compressor),
1470 1476 'compress w/ %s' % engine))
1471 1477
1472 1478 for fn, title in benches:
1473 1479 timer, fm = gettimer(ui, opts)
1474 1480 timer(fn, title=title)
1475 1481 fm.end()
1476 1482
1477 1483 @command('perfrevlogrevision', revlogopts + formatteropts +
1478 1484 [('', 'cache', False, 'use caches instead of clearing')],
1479 1485 '-c|-m|FILE REV')
1480 1486 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
1481 1487 """Benchmark obtaining a revlog revision.
1482 1488
1483 1489 Obtaining a revlog revision consists of roughly the following steps:
1484 1490
1485 1491 1. Compute the delta chain
1486 1492 2. Obtain the raw chunks for that delta chain
1487 1493 3. Decompress each raw chunk
1488 1494 4. Apply binary patches to obtain fulltext
1489 1495 5. Verify hash of fulltext
1490 1496
1491 1497 This command measures the time spent in each of these phases.
1492 1498 """
1493 1499 if opts.get('changelog') or opts.get('manifest'):
1494 1500 file_, rev = None, file_
1495 1501 elif rev is None:
1496 1502 raise error.CommandError('perfrevlogrevision', 'invalid arguments')
1497 1503
1498 1504 r = cmdutil.openrevlog(repo, 'perfrevlogrevision', file_, opts)
1499 1505
1500 1506 # _chunkraw was renamed to _getsegmentforrevs.
1501 1507 try:
1502 1508 segmentforrevs = r._getsegmentforrevs
1503 1509 except AttributeError:
1504 1510 segmentforrevs = r._chunkraw
1505 1511
1506 1512 node = r.lookup(rev)
1507 1513 rev = r.rev(node)
1508 1514
1509 1515 def getrawchunks(data, chain):
1510 1516 start = r.start
1511 1517 length = r.length
1512 1518 inline = r._inline
1513 1519 iosize = r._io.size
1514 1520 buffer = util.buffer
1515 1521 offset = start(chain[0])
1516 1522
1517 1523 chunks = []
1518 1524 ladd = chunks.append
1519 1525
1520 1526 for rev in chain:
1521 1527 chunkstart = start(rev)
1522 1528 if inline:
1523 1529 chunkstart += (rev + 1) * iosize
1524 1530 chunklength = length(rev)
1525 1531 ladd(buffer(data, chunkstart - offset, chunklength))
1526 1532
1527 1533 return chunks
1528 1534
1529 1535 def dodeltachain(rev):
1530 1536 if not cache:
1531 1537 r.clearcaches()
1532 1538 r._deltachain(rev)
1533 1539
1534 1540 def doread(chain):
1535 1541 if not cache:
1536 1542 r.clearcaches()
1537 1543 segmentforrevs(chain[0], chain[-1])
1538 1544
1539 1545 def dorawchunks(data, chain):
1540 1546 if not cache:
1541 1547 r.clearcaches()
1542 1548 getrawchunks(data, chain)
1543 1549
1544 1550 def dodecompress(chunks):
1545 1551 decomp = r.decompress
1546 1552 for chunk in chunks:
1547 1553 decomp(chunk)
1548 1554
1549 1555 def dopatch(text, bins):
1550 1556 if not cache:
1551 1557 r.clearcaches()
1552 1558 mdiff.patches(text, bins)
1553 1559
1554 1560 def dohash(text):
1555 1561 if not cache:
1556 1562 r.clearcaches()
1557 1563 r.checkhash(text, node, rev=rev)
1558 1564
1559 1565 def dorevision():
1560 1566 if not cache:
1561 1567 r.clearcaches()
1562 1568 r.revision(node)
1563 1569
1564 1570 chain = r._deltachain(rev)[0]
1565 1571 data = segmentforrevs(chain[0], chain[-1])[1]
1566 1572 rawchunks = getrawchunks(data, chain)
1567 1573 bins = r._chunks(chain)
1568 1574 text = str(bins[0])
1569 1575 bins = bins[1:]
1570 1576 text = mdiff.patches(text, bins)
1571 1577
1572 1578 benches = [
1573 1579 (lambda: dorevision(), 'full'),
1574 1580 (lambda: dodeltachain(rev), 'deltachain'),
1575 1581 (lambda: doread(chain), 'read'),
1576 1582 (lambda: dorawchunks(data, chain), 'rawchunks'),
1577 1583 (lambda: dodecompress(rawchunks), 'decompress'),
1578 1584 (lambda: dopatch(text, bins), 'patch'),
1579 1585 (lambda: dohash(text), 'hash'),
1580 1586 ]
1581 1587
1582 1588 for fn, title in benches:
1583 1589 timer, fm = gettimer(ui, opts)
1584 1590 timer(fn, title=title)
1585 1591 fm.end()
1586 1592
1587 1593 @command('perfrevset',
1588 1594 [('C', 'clear', False, 'clear volatile cache between each call.'),
1589 1595 ('', 'contexts', False, 'obtain changectx for each revision')]
1590 1596 + formatteropts, "REVSET")
1591 1597 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
1592 1598 """benchmark the execution time of a revset
1593 1599
1594 1600 Use the --clean option if need to evaluate the impact of build volatile
1595 1601 revisions set cache on the revset execution. Volatile cache hold filtered
1596 1602 and obsolete related cache."""
1597 1603 timer, fm = gettimer(ui, opts)
1598 1604 def d():
1599 1605 if clear:
1600 1606 repo.invalidatevolatilesets()
1601 1607 if contexts:
1602 1608 for ctx in repo.set(expr): pass
1603 1609 else:
1604 1610 for r in repo.revs(expr): pass
1605 1611 timer(d)
1606 1612 fm.end()
1607 1613
1608 1614 @command('perfvolatilesets',
1609 1615 [('', 'clear-obsstore', False, 'drop obsstore between each call.'),
1610 1616 ] + formatteropts)
1611 1617 def perfvolatilesets(ui, repo, *names, **opts):
1612 1618 """benchmark the computation of various volatile set
1613 1619
1614 1620 Volatile set computes element related to filtering and obsolescence."""
1615 1621 timer, fm = gettimer(ui, opts)
1616 1622 repo = repo.unfiltered()
1617 1623
1618 1624 def getobs(name):
1619 1625 def d():
1620 1626 repo.invalidatevolatilesets()
1621 1627 if opts['clear_obsstore']:
1622 1628 clearfilecache(repo, 'obsstore')
1623 1629 obsolete.getrevs(repo, name)
1624 1630 return d
1625 1631
1626 1632 allobs = sorted(obsolete.cachefuncs)
1627 1633 if names:
1628 1634 allobs = [n for n in allobs if n in names]
1629 1635
1630 1636 for name in allobs:
1631 1637 timer(getobs(name), title=name)
1632 1638
1633 1639 def getfiltered(name):
1634 1640 def d():
1635 1641 repo.invalidatevolatilesets()
1636 1642 if opts['clear_obsstore']:
1637 1643 clearfilecache(repo, 'obsstore')
1638 1644 repoview.filterrevs(repo, name)
1639 1645 return d
1640 1646
1641 1647 allfilter = sorted(repoview.filtertable)
1642 1648 if names:
1643 1649 allfilter = [n for n in allfilter if n in names]
1644 1650
1645 1651 for name in allfilter:
1646 1652 timer(getfiltered(name), title=name)
1647 1653 fm.end()
1648 1654
1649 1655 @command('perfbranchmap',
1650 1656 [('f', 'full', False,
1651 1657 'Includes build time of subset'),
1652 1658 ('', 'clear-revbranch', False,
1653 1659 'purge the revbranch cache between computation'),
1654 1660 ] + formatteropts)
1655 1661 def perfbranchmap(ui, repo, *filternames, **opts):
1656 1662 """benchmark the update of a branchmap
1657 1663
1658 1664 This benchmarks the full repo.branchmap() call with read and write disabled
1659 1665 """
1660 1666 full = opts.get("full", False)
1661 1667 clear_revbranch = opts.get("clear_revbranch", False)
1662 1668 timer, fm = gettimer(ui, opts)
1663 1669 def getbranchmap(filtername):
1664 1670 """generate a benchmark function for the filtername"""
1665 1671 if filtername is None:
1666 1672 view = repo
1667 1673 else:
1668 1674 view = repo.filtered(filtername)
1669 1675 def d():
1670 1676 if clear_revbranch:
1671 1677 repo.revbranchcache()._clear()
1672 1678 if full:
1673 1679 view._branchcaches.clear()
1674 1680 else:
1675 1681 view._branchcaches.pop(filtername, None)
1676 1682 view.branchmap()
1677 1683 return d
1678 1684 # add filter in smaller subset to bigger subset
1679 1685 possiblefilters = set(repoview.filtertable)
1680 1686 if filternames:
1681 1687 possiblefilters &= set(filternames)
1682 1688 subsettable = getbranchmapsubsettable()
1683 1689 allfilters = []
1684 1690 while possiblefilters:
1685 1691 for name in possiblefilters:
1686 1692 subset = subsettable.get(name)
1687 1693 if subset not in possiblefilters:
1688 1694 break
1689 1695 else:
1690 1696 assert False, 'subset cycle %s!' % possiblefilters
1691 1697 allfilters.append(name)
1692 1698 possiblefilters.remove(name)
1693 1699
1694 1700 # warm the cache
1695 1701 if not full:
1696 1702 for name in allfilters:
1697 1703 repo.filtered(name).branchmap()
1698 1704 if not filternames or 'unfiltered' in filternames:
1699 1705 # add unfiltered
1700 1706 allfilters.append(None)
1701 1707
1702 1708 branchcacheread = safeattrsetter(branchmap, 'read')
1703 1709 branchcachewrite = safeattrsetter(branchmap.branchcache, 'write')
1704 1710 branchcacheread.set(lambda repo: None)
1705 1711 branchcachewrite.set(lambda bc, repo: None)
1706 1712 try:
1707 1713 for name in allfilters:
1708 1714 printname = name
1709 1715 if name is None:
1710 1716 printname = 'unfiltered'
1711 1717 timer(getbranchmap(name), title=str(printname))
1712 1718 finally:
1713 1719 branchcacheread.restore()
1714 1720 branchcachewrite.restore()
1715 1721 fm.end()
1716 1722
1717 1723 @command('perfloadmarkers')
1718 1724 def perfloadmarkers(ui, repo):
1719 1725 """benchmark the time to parse the on-disk markers for a repo
1720 1726
1721 1727 Result is the number of markers in the repo."""
1722 1728 timer, fm = gettimer(ui)
1723 1729 svfs = getsvfs(repo)
1724 1730 timer(lambda: len(obsolete.obsstore(svfs)))
1725 1731 fm.end()
1726 1732
1727 1733 @command('perflrucachedict', formatteropts +
1728 1734 [('', 'size', 4, 'size of cache'),
1729 1735 ('', 'gets', 10000, 'number of key lookups'),
1730 1736 ('', 'sets', 10000, 'number of key sets'),
1731 1737 ('', 'mixed', 10000, 'number of mixed mode operations'),
1732 1738 ('', 'mixedgetfreq', 50, 'frequency of get vs set ops in mixed mode')],
1733 1739 norepo=True)
1734 1740 def perflrucache(ui, size=4, gets=10000, sets=10000, mixed=10000,
1735 1741 mixedgetfreq=50, **opts):
1736 1742 def doinit():
1737 1743 for i in xrange(10000):
1738 1744 util.lrucachedict(size)
1739 1745
1740 1746 values = []
1741 1747 for i in xrange(size):
1742 1748 values.append(random.randint(0, sys.maxint))
1743 1749
1744 1750 # Get mode fills the cache and tests raw lookup performance with no
1745 1751 # eviction.
1746 1752 getseq = []
1747 1753 for i in xrange(gets):
1748 1754 getseq.append(random.choice(values))
1749 1755
1750 1756 def dogets():
1751 1757 d = util.lrucachedict(size)
1752 1758 for v in values:
1753 1759 d[v] = v
1754 1760 for key in getseq:
1755 1761 value = d[key]
1756 1762 value # silence pyflakes warning
1757 1763
1758 1764 # Set mode tests insertion speed with cache eviction.
1759 1765 setseq = []
1760 1766 for i in xrange(sets):
1761 1767 setseq.append(random.randint(0, sys.maxint))
1762 1768
1763 1769 def dosets():
1764 1770 d = util.lrucachedict(size)
1765 1771 for v in setseq:
1766 1772 d[v] = v
1767 1773
1768 1774 # Mixed mode randomly performs gets and sets with eviction.
1769 1775 mixedops = []
1770 1776 for i in xrange(mixed):
1771 1777 r = random.randint(0, 100)
1772 1778 if r < mixedgetfreq:
1773 1779 op = 0
1774 1780 else:
1775 1781 op = 1
1776 1782
1777 1783 mixedops.append((op, random.randint(0, size * 2)))
1778 1784
1779 1785 def domixed():
1780 1786 d = util.lrucachedict(size)
1781 1787
1782 1788 for op, v in mixedops:
1783 1789 if op == 0:
1784 1790 try:
1785 1791 d[v]
1786 1792 except KeyError:
1787 1793 pass
1788 1794 else:
1789 1795 d[v] = v
1790 1796
1791 1797 benches = [
1792 1798 (doinit, 'init'),
1793 1799 (dogets, 'gets'),
1794 1800 (dosets, 'sets'),
1795 1801 (domixed, 'mixed')
1796 1802 ]
1797 1803
1798 1804 for fn, title in benches:
1799 1805 timer, fm = gettimer(ui, opts)
1800 1806 timer(fn, title=title)
1801 1807 fm.end()
1802 1808
1803 1809 @command('perfwrite', formatteropts)
1804 1810 def perfwrite(ui, repo, **opts):
1805 1811 """microbenchmark ui.write
1806 1812 """
1807 1813 timer, fm = gettimer(ui, opts)
1808 1814 def write():
1809 1815 for i in range(100000):
1810 1816 ui.write(('Testing write performance\n'))
1811 1817 timer(write)
1812 1818 fm.end()
1813 1819
1814 1820 def uisetup(ui):
1815 1821 if (util.safehasattr(cmdutil, 'openrevlog') and
1816 1822 not util.safehasattr(commands, 'debugrevlogopts')):
1817 1823 # for "historical portability":
1818 1824 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
1819 1825 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
1820 1826 # openrevlog() should cause failure, because it has been
1821 1827 # available since 3.5 (or 49c583ca48c4).
1822 1828 def openrevlog(orig, repo, cmd, file_, opts):
1823 1829 if opts.get('dir') and not util.safehasattr(repo, 'dirlog'):
1824 1830 raise error.Abort("This version doesn't support --dir option",
1825 1831 hint="use 3.5 or later")
1826 1832 return orig(repo, cmd, file_, opts)
1827 1833 extensions.wrapfunction(cmdutil, 'openrevlog', openrevlog)
General Comments 0
You need to be logged in to leave comments. Login now