##// END OF EJS Templates
perf: factor selection of revisions involved in the merge out...
marmoute -
r42575:3a3592b4 default
parent child Browse files
Show More
@@ -1,2924 +1,2926 b''
1 1 # perf.py - performance test routines
2 2 '''helper extension to measure performance
3 3
4 4 Configurations
5 5 ==============
6 6
7 7 ``perf``
8 8 --------
9 9
10 10 ``all-timing``
11 11 When set, additional statistics will be reported for each benchmark: best,
12 12 worst, median average. If not set only the best timing is reported
13 13 (default: off).
14 14
15 15 ``presleep``
16 16 number of second to wait before any group of runs (default: 1)
17 17
18 18 ``pre-run``
19 19 number of run to perform before starting measurement.
20 20
21 21 ``profile-benchmark``
22 22 Enable profiling for the benchmarked section.
23 23 (The first iteration is benchmarked)
24 24
25 25 ``run-limits``
26 26 Control the number of runs each benchmark will perform. The option value
27 27 should be a list of `<time>-<numberofrun>` pairs. After each run the
28 28 conditions are considered in order with the following logic:
29 29
30 30 If benchmark has been running for <time> seconds, and we have performed
31 31 <numberofrun> iterations, stop the benchmark,
32 32
33 33 The default value is: `3.0-100, 10.0-3`
34 34
35 35 ``stub``
36 36 When set, benchmarks will only be run once, useful for testing
37 37 (default: off)
38 38 '''
39 39
40 40 # "historical portability" policy of perf.py:
41 41 #
42 42 # We have to do:
43 43 # - make perf.py "loadable" with as wide Mercurial version as possible
44 44 # This doesn't mean that perf commands work correctly with that Mercurial.
45 45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
46 46 # - make historical perf command work correctly with as wide Mercurial
47 47 # version as possible
48 48 #
49 49 # We have to do, if possible with reasonable cost:
50 50 # - make recent perf command for historical feature work correctly
51 51 # with early Mercurial
52 52 #
53 53 # We don't have to do:
54 54 # - make perf command for recent feature work correctly with early
55 55 # Mercurial
56 56
57 57 from __future__ import absolute_import
58 58 import contextlib
59 59 import functools
60 60 import gc
61 61 import os
62 62 import random
63 63 import shutil
64 64 import struct
65 65 import sys
66 66 import tempfile
67 67 import threading
68 68 import time
69 69 from mercurial import (
70 70 changegroup,
71 71 cmdutil,
72 72 commands,
73 73 copies,
74 74 error,
75 75 extensions,
76 76 hg,
77 77 mdiff,
78 78 merge,
79 79 revlog,
80 80 util,
81 81 )
82 82
83 83 # for "historical portability":
84 84 # try to import modules separately (in dict order), and ignore
85 85 # failure, because these aren't available with early Mercurial
86 86 try:
87 87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
88 88 except ImportError:
89 89 pass
90 90 try:
91 91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
92 92 except ImportError:
93 93 pass
94 94 try:
95 95 from mercurial import registrar # since 3.7 (or 37d50250b696)
96 96 dir(registrar) # forcibly load it
97 97 except ImportError:
98 98 registrar = None
99 99 try:
100 100 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
101 101 except ImportError:
102 102 pass
103 103 try:
104 104 from mercurial.utils import repoviewutil # since 5.0
105 105 except ImportError:
106 106 repoviewutil = None
107 107 try:
108 108 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
109 109 except ImportError:
110 110 pass
111 111 try:
112 112 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
113 113 except ImportError:
114 114 pass
115 115
116 116 try:
117 117 from mercurial import profiling
118 118 except ImportError:
119 119 profiling = None
120 120
121 121 def identity(a):
122 122 return a
123 123
124 124 try:
125 125 from mercurial import pycompat
126 126 getargspec = pycompat.getargspec # added to module after 4.5
127 127 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
128 128 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
129 129 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
130 130 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
131 131 if pycompat.ispy3:
132 132 _maxint = sys.maxsize # per py3 docs for replacing maxint
133 133 else:
134 134 _maxint = sys.maxint
135 135 except (ImportError, AttributeError):
136 136 import inspect
137 137 getargspec = inspect.getargspec
138 138 _byteskwargs = identity
139 139 fsencode = identity # no py3 support
140 140 _maxint = sys.maxint # no py3 support
141 141 _sysstr = lambda x: x # no py3 support
142 142 _xrange = xrange
143 143
144 144 try:
145 145 # 4.7+
146 146 queue = pycompat.queue.Queue
147 147 except (AttributeError, ImportError):
148 148 # <4.7.
149 149 try:
150 150 queue = pycompat.queue
151 151 except (AttributeError, ImportError):
152 152 queue = util.queue
153 153
154 154 try:
155 155 from mercurial import logcmdutil
156 156 makelogtemplater = logcmdutil.maketemplater
157 157 except (AttributeError, ImportError):
158 158 try:
159 159 makelogtemplater = cmdutil.makelogtemplater
160 160 except (AttributeError, ImportError):
161 161 makelogtemplater = None
162 162
163 163 # for "historical portability":
164 164 # define util.safehasattr forcibly, because util.safehasattr has been
165 165 # available since 1.9.3 (or 94b200a11cf7)
166 166 _undefined = object()
167 167 def safehasattr(thing, attr):
168 168 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
169 169 setattr(util, 'safehasattr', safehasattr)
170 170
171 171 # for "historical portability":
172 172 # define util.timer forcibly, because util.timer has been available
173 173 # since ae5d60bb70c9
174 174 if safehasattr(time, 'perf_counter'):
175 175 util.timer = time.perf_counter
176 176 elif os.name == b'nt':
177 177 util.timer = time.clock
178 178 else:
179 179 util.timer = time.time
180 180
181 181 # for "historical portability":
182 182 # use locally defined empty option list, if formatteropts isn't
183 183 # available, because commands.formatteropts has been available since
184 184 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
185 185 # available since 2.2 (or ae5f92e154d3)
186 186 formatteropts = getattr(cmdutil, "formatteropts",
187 187 getattr(commands, "formatteropts", []))
188 188
189 189 # for "historical portability":
190 190 # use locally defined option list, if debugrevlogopts isn't available,
191 191 # because commands.debugrevlogopts has been available since 3.7 (or
192 192 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
193 193 # since 1.9 (or a79fea6b3e77).
194 194 revlogopts = getattr(cmdutil, "debugrevlogopts",
195 195 getattr(commands, "debugrevlogopts", [
196 196 (b'c', b'changelog', False, (b'open changelog')),
197 197 (b'm', b'manifest', False, (b'open manifest')),
198 198 (b'', b'dir', False, (b'open directory manifest')),
199 199 ]))
200 200
201 201 cmdtable = {}
202 202
203 203 # for "historical portability":
204 204 # define parsealiases locally, because cmdutil.parsealiases has been
205 205 # available since 1.5 (or 6252852b4332)
206 206 def parsealiases(cmd):
207 207 return cmd.split(b"|")
208 208
209 209 if safehasattr(registrar, 'command'):
210 210 command = registrar.command(cmdtable)
211 211 elif safehasattr(cmdutil, 'command'):
212 212 command = cmdutil.command(cmdtable)
213 213 if b'norepo' not in getargspec(command).args:
214 214 # for "historical portability":
215 215 # wrap original cmdutil.command, because "norepo" option has
216 216 # been available since 3.1 (or 75a96326cecb)
217 217 _command = command
218 218 def command(name, options=(), synopsis=None, norepo=False):
219 219 if norepo:
220 220 commands.norepo += b' %s' % b' '.join(parsealiases(name))
221 221 return _command(name, list(options), synopsis)
222 222 else:
223 223 # for "historical portability":
224 224 # define "@command" annotation locally, because cmdutil.command
225 225 # has been available since 1.9 (or 2daa5179e73f)
226 226 def command(name, options=(), synopsis=None, norepo=False):
227 227 def decorator(func):
228 228 if synopsis:
229 229 cmdtable[name] = func, list(options), synopsis
230 230 else:
231 231 cmdtable[name] = func, list(options)
232 232 if norepo:
233 233 commands.norepo += b' %s' % b' '.join(parsealiases(name))
234 234 return func
235 235 return decorator
236 236
237 237 try:
238 238 import mercurial.registrar
239 239 import mercurial.configitems
240 240 configtable = {}
241 241 configitem = mercurial.registrar.configitem(configtable)
242 242 configitem(b'perf', b'presleep',
243 243 default=mercurial.configitems.dynamicdefault,
244 244 )
245 245 configitem(b'perf', b'stub',
246 246 default=mercurial.configitems.dynamicdefault,
247 247 )
248 248 configitem(b'perf', b'parentscount',
249 249 default=mercurial.configitems.dynamicdefault,
250 250 )
251 251 configitem(b'perf', b'all-timing',
252 252 default=mercurial.configitems.dynamicdefault,
253 253 )
254 254 configitem(b'perf', b'pre-run',
255 255 default=mercurial.configitems.dynamicdefault,
256 256 )
257 257 configitem(b'perf', b'profile-benchmark',
258 258 default=mercurial.configitems.dynamicdefault,
259 259 )
260 260 configitem(b'perf', b'run-limits',
261 261 default=mercurial.configitems.dynamicdefault,
262 262 )
263 263 except (ImportError, AttributeError):
264 264 pass
265 265
266 266 def getlen(ui):
267 267 if ui.configbool(b"perf", b"stub", False):
268 268 return lambda x: 1
269 269 return len
270 270
271 271 class noop(object):
272 272 """dummy context manager"""
273 273 def __enter__(self):
274 274 pass
275 275 def __exit__(self, *args):
276 276 pass
277 277
278 278 NOOPCTX = noop()
279 279
280 280 def gettimer(ui, opts=None):
281 281 """return a timer function and formatter: (timer, formatter)
282 282
283 283 This function exists to gather the creation of formatter in a single
284 284 place instead of duplicating it in all performance commands."""
285 285
286 286 # enforce an idle period before execution to counteract power management
287 287 # experimental config: perf.presleep
288 288 time.sleep(getint(ui, b"perf", b"presleep", 1))
289 289
290 290 if opts is None:
291 291 opts = {}
292 292 # redirect all to stderr unless buffer api is in use
293 293 if not ui._buffers:
294 294 ui = ui.copy()
295 295 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
296 296 if uifout:
297 297 # for "historical portability":
298 298 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
299 299 uifout.set(ui.ferr)
300 300
301 301 # get a formatter
302 302 uiformatter = getattr(ui, 'formatter', None)
303 303 if uiformatter:
304 304 fm = uiformatter(b'perf', opts)
305 305 else:
306 306 # for "historical portability":
307 307 # define formatter locally, because ui.formatter has been
308 308 # available since 2.2 (or ae5f92e154d3)
309 309 from mercurial import node
310 310 class defaultformatter(object):
311 311 """Minimized composition of baseformatter and plainformatter
312 312 """
313 313 def __init__(self, ui, topic, opts):
314 314 self._ui = ui
315 315 if ui.debugflag:
316 316 self.hexfunc = node.hex
317 317 else:
318 318 self.hexfunc = node.short
319 319 def __nonzero__(self):
320 320 return False
321 321 __bool__ = __nonzero__
322 322 def startitem(self):
323 323 pass
324 324 def data(self, **data):
325 325 pass
326 326 def write(self, fields, deftext, *fielddata, **opts):
327 327 self._ui.write(deftext % fielddata, **opts)
328 328 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
329 329 if cond:
330 330 self._ui.write(deftext % fielddata, **opts)
331 331 def plain(self, text, **opts):
332 332 self._ui.write(text, **opts)
333 333 def end(self):
334 334 pass
335 335 fm = defaultformatter(ui, b'perf', opts)
336 336
337 337 # stub function, runs code only once instead of in a loop
338 338 # experimental config: perf.stub
339 339 if ui.configbool(b"perf", b"stub", False):
340 340 return functools.partial(stub_timer, fm), fm
341 341
342 342 # experimental config: perf.all-timing
343 343 displayall = ui.configbool(b"perf", b"all-timing", False)
344 344
345 345 # experimental config: perf.run-limits
346 346 limitspec = ui.configlist(b"perf", b"run-limits", [])
347 347 limits = []
348 348 for item in limitspec:
349 349 parts = item.split(b'-', 1)
350 350 if len(parts) < 2:
351 351 ui.warn((b'malformatted run limit entry, missing "-": %s\n'
352 352 % item))
353 353 continue
354 354 try:
355 355 time_limit = float(pycompat.sysstr(parts[0]))
356 356 except ValueError as e:
357 357 ui.warn((b'malformatted run limit entry, %s: %s\n'
358 358 % (pycompat.bytestr(e), item)))
359 359 continue
360 360 try:
361 361 run_limit = int(pycompat.sysstr(parts[1]))
362 362 except ValueError as e:
363 363 ui.warn((b'malformatted run limit entry, %s: %s\n'
364 364 % (pycompat.bytestr(e), item)))
365 365 continue
366 366 limits.append((time_limit, run_limit))
367 367 if not limits:
368 368 limits = DEFAULTLIMITS
369 369
370 370 profiler = None
371 371 if profiling is not None:
372 372 if ui.configbool(b"perf", b"profile-benchmark", False):
373 373 profiler = profiling.profile(ui)
374 374
375 375 prerun = getint(ui, b"perf", b"pre-run", 0)
376 376 t = functools.partial(_timer, fm, displayall=displayall, limits=limits,
377 377 prerun=prerun, profiler=profiler)
378 378 return t, fm
379 379
380 380 def stub_timer(fm, func, setup=None, title=None):
381 381 if setup is not None:
382 382 setup()
383 383 func()
384 384
385 385 @contextlib.contextmanager
386 386 def timeone():
387 387 r = []
388 388 ostart = os.times()
389 389 cstart = util.timer()
390 390 yield r
391 391 cstop = util.timer()
392 392 ostop = os.times()
393 393 a, b = ostart, ostop
394 394 r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
395 395
396 396
397 397 # list of stop condition (elapsed time, minimal run count)
398 398 DEFAULTLIMITS = (
399 399 (3.0, 100),
400 400 (10.0, 3),
401 401 )
402 402
403 403 def _timer(fm, func, setup=None, title=None, displayall=False,
404 404 limits=DEFAULTLIMITS, prerun=0, profiler=None):
405 405 gc.collect()
406 406 results = []
407 407 begin = util.timer()
408 408 count = 0
409 409 if profiler is None:
410 410 profiler = NOOPCTX
411 411 for i in range(prerun):
412 412 if setup is not None:
413 413 setup()
414 414 func()
415 415 keepgoing = True
416 416 while keepgoing:
417 417 if setup is not None:
418 418 setup()
419 419 with profiler:
420 420 with timeone() as item:
421 421 r = func()
422 422 profiler = NOOPCTX
423 423 count += 1
424 424 results.append(item[0])
425 425 cstop = util.timer()
426 426 # Look for a stop condition.
427 427 elapsed = cstop - begin
428 428 for t, mincount in limits:
429 429 if elapsed >= t and count >= mincount:
430 430 keepgoing = False
431 431 break
432 432
433 433 formatone(fm, results, title=title, result=r,
434 434 displayall=displayall)
435 435
436 436 def formatone(fm, timings, title=None, result=None, displayall=False):
437 437
438 438 count = len(timings)
439 439
440 440 fm.startitem()
441 441
442 442 if title:
443 443 fm.write(b'title', b'! %s\n', title)
444 444 if result:
445 445 fm.write(b'result', b'! result: %s\n', result)
446 446 def display(role, entry):
447 447 prefix = b''
448 448 if role != b'best':
449 449 prefix = b'%s.' % role
450 450 fm.plain(b'!')
451 451 fm.write(prefix + b'wall', b' wall %f', entry[0])
452 452 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
453 453 fm.write(prefix + b'user', b' user %f', entry[1])
454 454 fm.write(prefix + b'sys', b' sys %f', entry[2])
455 455 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
456 456 fm.plain(b'\n')
457 457 timings.sort()
458 458 min_val = timings[0]
459 459 display(b'best', min_val)
460 460 if displayall:
461 461 max_val = timings[-1]
462 462 display(b'max', max_val)
463 463 avg = tuple([sum(x) / count for x in zip(*timings)])
464 464 display(b'avg', avg)
465 465 median = timings[len(timings) // 2]
466 466 display(b'median', median)
467 467
468 468 # utilities for historical portability
469 469
470 470 def getint(ui, section, name, default):
471 471 # for "historical portability":
472 472 # ui.configint has been available since 1.9 (or fa2b596db182)
473 473 v = ui.config(section, name, None)
474 474 if v is None:
475 475 return default
476 476 try:
477 477 return int(v)
478 478 except ValueError:
479 479 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
480 480 % (section, name, v))
481 481
482 482 def safeattrsetter(obj, name, ignoremissing=False):
483 483 """Ensure that 'obj' has 'name' attribute before subsequent setattr
484 484
485 485 This function is aborted, if 'obj' doesn't have 'name' attribute
486 486 at runtime. This avoids overlooking removal of an attribute, which
487 487 breaks assumption of performance measurement, in the future.
488 488
489 489 This function returns the object to (1) assign a new value, and
490 490 (2) restore an original value to the attribute.
491 491
492 492 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
493 493 abortion, and this function returns None. This is useful to
494 494 examine an attribute, which isn't ensured in all Mercurial
495 495 versions.
496 496 """
497 497 if not util.safehasattr(obj, name):
498 498 if ignoremissing:
499 499 return None
500 500 raise error.Abort((b"missing attribute %s of %s might break assumption"
501 501 b" of performance measurement") % (name, obj))
502 502
503 503 origvalue = getattr(obj, _sysstr(name))
504 504 class attrutil(object):
505 505 def set(self, newvalue):
506 506 setattr(obj, _sysstr(name), newvalue)
507 507 def restore(self):
508 508 setattr(obj, _sysstr(name), origvalue)
509 509
510 510 return attrutil()
511 511
512 512 # utilities to examine each internal API changes
513 513
514 514 def getbranchmapsubsettable():
515 515 # for "historical portability":
516 516 # subsettable is defined in:
517 517 # - branchmap since 2.9 (or 175c6fd8cacc)
518 518 # - repoview since 2.5 (or 59a9f18d4587)
519 519 # - repoviewutil since 5.0
520 520 for mod in (branchmap, repoview, repoviewutil):
521 521 subsettable = getattr(mod, 'subsettable', None)
522 522 if subsettable:
523 523 return subsettable
524 524
525 525 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
526 526 # branchmap and repoview modules exist, but subsettable attribute
527 527 # doesn't)
528 528 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
529 529 hint=b"use 2.5 or later")
530 530
531 531 def getsvfs(repo):
532 532 """Return appropriate object to access files under .hg/store
533 533 """
534 534 # for "historical portability":
535 535 # repo.svfs has been available since 2.3 (or 7034365089bf)
536 536 svfs = getattr(repo, 'svfs', None)
537 537 if svfs:
538 538 return svfs
539 539 else:
540 540 return getattr(repo, 'sopener')
541 541
542 542 def getvfs(repo):
543 543 """Return appropriate object to access files under .hg
544 544 """
545 545 # for "historical portability":
546 546 # repo.vfs has been available since 2.3 (or 7034365089bf)
547 547 vfs = getattr(repo, 'vfs', None)
548 548 if vfs:
549 549 return vfs
550 550 else:
551 551 return getattr(repo, 'opener')
552 552
553 553 def repocleartagscachefunc(repo):
554 554 """Return the function to clear tags cache according to repo internal API
555 555 """
556 556 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
557 557 # in this case, setattr(repo, '_tagscache', None) or so isn't
558 558 # correct way to clear tags cache, because existing code paths
559 559 # expect _tagscache to be a structured object.
560 560 def clearcache():
561 561 # _tagscache has been filteredpropertycache since 2.5 (or
562 562 # 98c867ac1330), and delattr() can't work in such case
563 563 if b'_tagscache' in vars(repo):
564 564 del repo.__dict__[b'_tagscache']
565 565 return clearcache
566 566
567 567 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
568 568 if repotags: # since 1.4 (or 5614a628d173)
569 569 return lambda : repotags.set(None)
570 570
571 571 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
572 572 if repotagscache: # since 0.6 (or d7df759d0e97)
573 573 return lambda : repotagscache.set(None)
574 574
575 575 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
576 576 # this point, but it isn't so problematic, because:
577 577 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
578 578 # in perftags() causes failure soon
579 579 # - perf.py itself has been available since 1.1 (or eb240755386d)
580 580 raise error.Abort((b"tags API of this hg command is unknown"))
581 581
582 582 # utilities to clear cache
583 583
584 584 def clearfilecache(obj, attrname):
585 585 unfiltered = getattr(obj, 'unfiltered', None)
586 586 if unfiltered is not None:
587 587 obj = obj.unfiltered()
588 588 if attrname in vars(obj):
589 589 delattr(obj, attrname)
590 590 obj._filecache.pop(attrname, None)
591 591
592 592 def clearchangelog(repo):
593 593 if repo is not repo.unfiltered():
594 594 object.__setattr__(repo, r'_clcachekey', None)
595 595 object.__setattr__(repo, r'_clcache', None)
596 596 clearfilecache(repo.unfiltered(), 'changelog')
597 597
598 598 # perf commands
599 599
600 600 @command(b'perfwalk', formatteropts)
601 601 def perfwalk(ui, repo, *pats, **opts):
602 602 opts = _byteskwargs(opts)
603 603 timer, fm = gettimer(ui, opts)
604 604 m = scmutil.match(repo[None], pats, {})
605 605 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
606 606 ignored=False))))
607 607 fm.end()
608 608
609 609 @command(b'perfannotate', formatteropts)
610 610 def perfannotate(ui, repo, f, **opts):
611 611 opts = _byteskwargs(opts)
612 612 timer, fm = gettimer(ui, opts)
613 613 fc = repo[b'.'][f]
614 614 timer(lambda: len(fc.annotate(True)))
615 615 fm.end()
616 616
617 617 @command(b'perfstatus',
618 618 [(b'u', b'unknown', False,
619 619 b'ask status to look for unknown files')] + formatteropts)
620 620 def perfstatus(ui, repo, **opts):
621 621 opts = _byteskwargs(opts)
622 622 #m = match.always(repo.root, repo.getcwd())
623 623 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
624 624 # False))))
625 625 timer, fm = gettimer(ui, opts)
626 626 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
627 627 fm.end()
628 628
629 629 @command(b'perfaddremove', formatteropts)
630 630 def perfaddremove(ui, repo, **opts):
631 631 opts = _byteskwargs(opts)
632 632 timer, fm = gettimer(ui, opts)
633 633 try:
634 634 oldquiet = repo.ui.quiet
635 635 repo.ui.quiet = True
636 636 matcher = scmutil.match(repo[None])
637 637 opts[b'dry_run'] = True
638 638 if b'uipathfn' in getargspec(scmutil.addremove).args:
639 639 uipathfn = scmutil.getuipathfn(repo)
640 640 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
641 641 else:
642 642 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
643 643 finally:
644 644 repo.ui.quiet = oldquiet
645 645 fm.end()
646 646
647 647 def clearcaches(cl):
648 648 # behave somewhat consistently across internal API changes
649 649 if util.safehasattr(cl, b'clearcaches'):
650 650 cl.clearcaches()
651 651 elif util.safehasattr(cl, b'_nodecache'):
652 652 from mercurial.node import nullid, nullrev
653 653 cl._nodecache = {nullid: nullrev}
654 654 cl._nodepos = None
655 655
656 656 @command(b'perfheads', formatteropts)
657 657 def perfheads(ui, repo, **opts):
658 658 """benchmark the computation of a changelog heads"""
659 659 opts = _byteskwargs(opts)
660 660 timer, fm = gettimer(ui, opts)
661 661 cl = repo.changelog
662 662 def s():
663 663 clearcaches(cl)
664 664 def d():
665 665 len(cl.headrevs())
666 666 timer(d, setup=s)
667 667 fm.end()
668 668
669 669 @command(b'perftags', formatteropts+
670 670 [
671 671 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
672 672 ])
673 673 def perftags(ui, repo, **opts):
674 674 opts = _byteskwargs(opts)
675 675 timer, fm = gettimer(ui, opts)
676 676 repocleartagscache = repocleartagscachefunc(repo)
677 677 clearrevlogs = opts[b'clear_revlogs']
678 678 def s():
679 679 if clearrevlogs:
680 680 clearchangelog(repo)
681 681 clearfilecache(repo.unfiltered(), 'manifest')
682 682 repocleartagscache()
683 683 def t():
684 684 return len(repo.tags())
685 685 timer(t, setup=s)
686 686 fm.end()
687 687
688 688 @command(b'perfancestors', formatteropts)
689 689 def perfancestors(ui, repo, **opts):
690 690 opts = _byteskwargs(opts)
691 691 timer, fm = gettimer(ui, opts)
692 692 heads = repo.changelog.headrevs()
693 693 def d():
694 694 for a in repo.changelog.ancestors(heads):
695 695 pass
696 696 timer(d)
697 697 fm.end()
698 698
699 699 @command(b'perfancestorset', formatteropts)
700 700 def perfancestorset(ui, repo, revset, **opts):
701 701 opts = _byteskwargs(opts)
702 702 timer, fm = gettimer(ui, opts)
703 703 revs = repo.revs(revset)
704 704 heads = repo.changelog.headrevs()
705 705 def d():
706 706 s = repo.changelog.ancestors(heads)
707 707 for rev in revs:
708 708 rev in s
709 709 timer(d)
710 710 fm.end()
711 711
712 712 @command(b'perfdiscovery', formatteropts, b'PATH')
713 713 def perfdiscovery(ui, repo, path, **opts):
714 714 """benchmark discovery between local repo and the peer at given path
715 715 """
716 716 repos = [repo, None]
717 717 timer, fm = gettimer(ui, opts)
718 718 path = ui.expandpath(path)
719 719
720 720 def s():
721 721 repos[1] = hg.peer(ui, opts, path)
722 722 def d():
723 723 setdiscovery.findcommonheads(ui, *repos)
724 724 timer(d, setup=s)
725 725 fm.end()
726 726
727 727 @command(b'perfbookmarks', formatteropts +
728 728 [
729 729 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
730 730 ])
731 731 def perfbookmarks(ui, repo, **opts):
732 732 """benchmark parsing bookmarks from disk to memory"""
733 733 opts = _byteskwargs(opts)
734 734 timer, fm = gettimer(ui, opts)
735 735
736 736 clearrevlogs = opts[b'clear_revlogs']
737 737 def s():
738 738 if clearrevlogs:
739 739 clearchangelog(repo)
740 740 clearfilecache(repo, b'_bookmarks')
741 741 def d():
742 742 repo._bookmarks
743 743 timer(d, setup=s)
744 744 fm.end()
745 745
746 746 @command(b'perfbundleread', formatteropts, b'BUNDLE')
747 747 def perfbundleread(ui, repo, bundlepath, **opts):
748 748 """Benchmark reading of bundle files.
749 749
750 750 This command is meant to isolate the I/O part of bundle reading as
751 751 much as possible.
752 752 """
753 753 from mercurial import (
754 754 bundle2,
755 755 exchange,
756 756 streamclone,
757 757 )
758 758
759 759 opts = _byteskwargs(opts)
760 760
761 761 def makebench(fn):
762 762 def run():
763 763 with open(bundlepath, b'rb') as fh:
764 764 bundle = exchange.readbundle(ui, fh, bundlepath)
765 765 fn(bundle)
766 766
767 767 return run
768 768
769 769 def makereadnbytes(size):
770 770 def run():
771 771 with open(bundlepath, b'rb') as fh:
772 772 bundle = exchange.readbundle(ui, fh, bundlepath)
773 773 while bundle.read(size):
774 774 pass
775 775
776 776 return run
777 777
778 778 def makestdioread(size):
779 779 def run():
780 780 with open(bundlepath, b'rb') as fh:
781 781 while fh.read(size):
782 782 pass
783 783
784 784 return run
785 785
786 786 # bundle1
787 787
788 788 def deltaiter(bundle):
789 789 for delta in bundle.deltaiter():
790 790 pass
791 791
792 792 def iterchunks(bundle):
793 793 for chunk in bundle.getchunks():
794 794 pass
795 795
796 796 # bundle2
797 797
798 798 def forwardchunks(bundle):
799 799 for chunk in bundle._forwardchunks():
800 800 pass
801 801
802 802 def iterparts(bundle):
803 803 for part in bundle.iterparts():
804 804 pass
805 805
806 806 def iterpartsseekable(bundle):
807 807 for part in bundle.iterparts(seekable=True):
808 808 pass
809 809
810 810 def seek(bundle):
811 811 for part in bundle.iterparts(seekable=True):
812 812 part.seek(0, os.SEEK_END)
813 813
814 814 def makepartreadnbytes(size):
815 815 def run():
816 816 with open(bundlepath, b'rb') as fh:
817 817 bundle = exchange.readbundle(ui, fh, bundlepath)
818 818 for part in bundle.iterparts():
819 819 while part.read(size):
820 820 pass
821 821
822 822 return run
823 823
824 824 benches = [
825 825 (makestdioread(8192), b'read(8k)'),
826 826 (makestdioread(16384), b'read(16k)'),
827 827 (makestdioread(32768), b'read(32k)'),
828 828 (makestdioread(131072), b'read(128k)'),
829 829 ]
830 830
831 831 with open(bundlepath, b'rb') as fh:
832 832 bundle = exchange.readbundle(ui, fh, bundlepath)
833 833
834 834 if isinstance(bundle, changegroup.cg1unpacker):
835 835 benches.extend([
836 836 (makebench(deltaiter), b'cg1 deltaiter()'),
837 837 (makebench(iterchunks), b'cg1 getchunks()'),
838 838 (makereadnbytes(8192), b'cg1 read(8k)'),
839 839 (makereadnbytes(16384), b'cg1 read(16k)'),
840 840 (makereadnbytes(32768), b'cg1 read(32k)'),
841 841 (makereadnbytes(131072), b'cg1 read(128k)'),
842 842 ])
843 843 elif isinstance(bundle, bundle2.unbundle20):
844 844 benches.extend([
845 845 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
846 846 (makebench(iterparts), b'bundle2 iterparts()'),
847 847 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
848 848 (makebench(seek), b'bundle2 part seek()'),
849 849 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
850 850 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
851 851 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
852 852 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
853 853 ])
854 854 elif isinstance(bundle, streamclone.streamcloneapplier):
855 855 raise error.Abort(b'stream clone bundles not supported')
856 856 else:
857 857 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
858 858
859 859 for fn, title in benches:
860 860 timer, fm = gettimer(ui, opts)
861 861 timer(fn, title=title)
862 862 fm.end()
863 863
864 864 @command(b'perfchangegroupchangelog', formatteropts +
865 865 [(b'', b'cgversion', b'02', b'changegroup version'),
866 866 (b'r', b'rev', b'', b'revisions to add to changegroup')])
867 867 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
868 868 """Benchmark producing a changelog group for a changegroup.
869 869
870 870 This measures the time spent processing the changelog during a
871 871 bundle operation. This occurs during `hg bundle` and on a server
872 872 processing a `getbundle` wire protocol request (handles clones
873 873 and pull requests).
874 874
875 875 By default, all revisions are added to the changegroup.
876 876 """
877 877 opts = _byteskwargs(opts)
878 878 cl = repo.changelog
879 879 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
880 880 bundler = changegroup.getbundler(cgversion, repo)
881 881
882 882 def d():
883 883 state, chunks = bundler._generatechangelog(cl, nodes)
884 884 for chunk in chunks:
885 885 pass
886 886
887 887 timer, fm = gettimer(ui, opts)
888 888
889 889 # Terminal printing can interfere with timing. So disable it.
890 890 with ui.configoverride({(b'progress', b'disable'): True}):
891 891 timer(d)
892 892
893 893 fm.end()
894 894
895 895 @command(b'perfdirs', formatteropts)
896 896 def perfdirs(ui, repo, **opts):
897 897 opts = _byteskwargs(opts)
898 898 timer, fm = gettimer(ui, opts)
899 899 dirstate = repo.dirstate
900 900 b'a' in dirstate
901 901 def d():
902 902 dirstate.hasdir(b'a')
903 903 del dirstate._map._dirs
904 904 timer(d)
905 905 fm.end()
906 906
907 907 @command(b'perfdirstate', formatteropts)
908 908 def perfdirstate(ui, repo, **opts):
909 909 opts = _byteskwargs(opts)
910 910 timer, fm = gettimer(ui, opts)
911 911 b"a" in repo.dirstate
912 912 def d():
913 913 repo.dirstate.invalidate()
914 914 b"a" in repo.dirstate
915 915 timer(d)
916 916 fm.end()
917 917
918 918 @command(b'perfdirstatedirs', formatteropts)
919 919 def perfdirstatedirs(ui, repo, **opts):
920 920 opts = _byteskwargs(opts)
921 921 timer, fm = gettimer(ui, opts)
922 922 b"a" in repo.dirstate
923 923 def d():
924 924 repo.dirstate.hasdir(b"a")
925 925 del repo.dirstate._map._dirs
926 926 timer(d)
927 927 fm.end()
928 928
929 929 @command(b'perfdirstatefoldmap', formatteropts)
930 930 def perfdirstatefoldmap(ui, repo, **opts):
931 931 opts = _byteskwargs(opts)
932 932 timer, fm = gettimer(ui, opts)
933 933 dirstate = repo.dirstate
934 934 b'a' in dirstate
935 935 def d():
936 936 dirstate._map.filefoldmap.get(b'a')
937 937 del dirstate._map.filefoldmap
938 938 timer(d)
939 939 fm.end()
940 940
941 941 @command(b'perfdirfoldmap', formatteropts)
942 942 def perfdirfoldmap(ui, repo, **opts):
943 943 opts = _byteskwargs(opts)
944 944 timer, fm = gettimer(ui, opts)
945 945 dirstate = repo.dirstate
946 946 b'a' in dirstate
947 947 def d():
948 948 dirstate._map.dirfoldmap.get(b'a')
949 949 del dirstate._map.dirfoldmap
950 950 del dirstate._map._dirs
951 951 timer(d)
952 952 fm.end()
953 953
954 954 @command(b'perfdirstatewrite', formatteropts)
955 955 def perfdirstatewrite(ui, repo, **opts):
956 956 opts = _byteskwargs(opts)
957 957 timer, fm = gettimer(ui, opts)
958 958 ds = repo.dirstate
959 959 b"a" in ds
960 960 def d():
961 961 ds._dirty = True
962 962 ds.write(repo.currenttransaction())
963 963 timer(d)
964 964 fm.end()
965 965
966 @command(b'perfmergecalculate',
967 [
968 (b'r', b'rev', b'.', b'rev to merge against'),
969 (b'', b'from', b'', b'rev to merge from'),
970 (b'', b'base', b'', b'the revision to use as base'),
971 ] + formatteropts)
972 def perfmergecalculate(ui, repo, rev, **opts):
973 opts = _byteskwargs(opts)
974 timer, fm = gettimer(ui, opts)
975
966 def _getmergerevs(repo, opts):
967 """parse command argument to return rev involved in merge
968
969 input: options dictionnary with `rev`, `from` and `bse`
970 output: (localctx, otherctx, basectx)
971 """
976 972 if opts['from']:
977 973 fromrev = scmutil.revsingle(repo, opts['from'])
978 974 wctx = repo[fromrev]
979 975 else:
980 976 wctx = repo[None]
981 977 # we don't want working dir files to be stat'd in the benchmark, so
982 978 # prime that cache
983 979 wctx.dirty()
984 rctx = scmutil.revsingle(repo, rev, rev)
980 rctx = scmutil.revsingle(repo, opts['rev'], opts['rev'])
985 981 if opts['base']:
986 982 fromrev = scmutil.revsingle(repo, opts['base'])
987 983 ancestor = repo[fromrev]
988 984 else:
989 985 ancestor = wctx.ancestor(rctx)
990 def d():
991 # acceptremote is True because we don't want prompts in the middle of
992 # our benchmark
993 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
994 acceptremote=True, followcopies=True)
995 timer(d)
996 fm.end()
986 return (wctx, rctx, ancestor)
987
988 @command(b'perfmergecalculate',
989 [
990 (b'r', b'rev', b'.', b'rev to merge against'),
991 (b'', b'from', b'', b'rev to merge from'),
992 (b'', b'base', b'', b'the revision to use as base'),
993 ] + formatteropts)
994 def perfmergecalculate(ui, repo, **opts):
995 opts = _byteskwargs(opts)
996 timer, fm = gettimer(ui, opts)
997
998 wctx, rctx, ancestor = _getmergerevs(repo, opts)
997 999 def d():
998 1000 # acceptremote is True because we don't want prompts in the middle of
999 1001 # our benchmark
1000 1002 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
1001 1003 acceptremote=True, followcopies=True)
1002 1004 timer(d)
1003 1005 fm.end()
1004 1006
1005 1007 @command(b'perfpathcopies', [], b"REV REV")
1006 1008 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1007 1009 """benchmark the copy tracing logic"""
1008 1010 opts = _byteskwargs(opts)
1009 1011 timer, fm = gettimer(ui, opts)
1010 1012 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1011 1013 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1012 1014 def d():
1013 1015 copies.pathcopies(ctx1, ctx2)
1014 1016 timer(d)
1015 1017 fm.end()
1016 1018
1017 1019 @command(b'perfphases',
1018 1020 [(b'', b'full', False, b'include file reading time too'),
1019 1021 ], b"")
1020 1022 def perfphases(ui, repo, **opts):
1021 1023 """benchmark phasesets computation"""
1022 1024 opts = _byteskwargs(opts)
1023 1025 timer, fm = gettimer(ui, opts)
1024 1026 _phases = repo._phasecache
1025 1027 full = opts.get(b'full')
1026 1028 def d():
1027 1029 phases = _phases
1028 1030 if full:
1029 1031 clearfilecache(repo, b'_phasecache')
1030 1032 phases = repo._phasecache
1031 1033 phases.invalidate()
1032 1034 phases.loadphaserevs(repo)
1033 1035 timer(d)
1034 1036 fm.end()
1035 1037
1036 1038 @command(b'perfphasesremote',
1037 1039 [], b"[DEST]")
1038 1040 def perfphasesremote(ui, repo, dest=None, **opts):
1039 1041 """benchmark time needed to analyse phases of the remote server"""
1040 1042 from mercurial.node import (
1041 1043 bin,
1042 1044 )
1043 1045 from mercurial import (
1044 1046 exchange,
1045 1047 hg,
1046 1048 phases,
1047 1049 )
1048 1050 opts = _byteskwargs(opts)
1049 1051 timer, fm = gettimer(ui, opts)
1050 1052
1051 1053 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
1052 1054 if not path:
1053 1055 raise error.Abort((b'default repository not configured!'),
1054 1056 hint=(b"see 'hg help config.paths'"))
1055 1057 dest = path.pushloc or path.loc
1056 1058 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
1057 1059 other = hg.peer(repo, opts, dest)
1058 1060
1059 1061 # easier to perform discovery through the operation
1060 1062 op = exchange.pushoperation(repo, other)
1061 1063 exchange._pushdiscoverychangeset(op)
1062 1064
1063 1065 remotesubset = op.fallbackheads
1064 1066
1065 1067 with other.commandexecutor() as e:
1066 1068 remotephases = e.callcommand(b'listkeys',
1067 1069 {b'namespace': b'phases'}).result()
1068 1070 del other
1069 1071 publishing = remotephases.get(b'publishing', False)
1070 1072 if publishing:
1071 1073 ui.status((b'publishing: yes\n'))
1072 1074 else:
1073 1075 ui.status((b'publishing: no\n'))
1074 1076
1075 1077 nodemap = repo.changelog.nodemap
1076 1078 nonpublishroots = 0
1077 1079 for nhex, phase in remotephases.iteritems():
1078 1080 if nhex == b'publishing': # ignore data related to publish option
1079 1081 continue
1080 1082 node = bin(nhex)
1081 1083 if node in nodemap and int(phase):
1082 1084 nonpublishroots += 1
1083 1085 ui.status((b'number of roots: %d\n') % len(remotephases))
1084 1086 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
1085 1087 def d():
1086 1088 phases.remotephasessummary(repo,
1087 1089 remotesubset,
1088 1090 remotephases)
1089 1091 timer(d)
1090 1092 fm.end()
1091 1093
1092 1094 @command(b'perfmanifest',[
1093 1095 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1094 1096 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1095 1097 ] + formatteropts, b'REV|NODE')
1096 1098 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1097 1099 """benchmark the time to read a manifest from disk and return a usable
1098 1100 dict-like object
1099 1101
1100 1102 Manifest caches are cleared before retrieval."""
1101 1103 opts = _byteskwargs(opts)
1102 1104 timer, fm = gettimer(ui, opts)
1103 1105 if not manifest_rev:
1104 1106 ctx = scmutil.revsingle(repo, rev, rev)
1105 1107 t = ctx.manifestnode()
1106 1108 else:
1107 1109 from mercurial.node import bin
1108 1110
1109 1111 if len(rev) == 40:
1110 1112 t = bin(rev)
1111 1113 else:
1112 1114 try:
1113 1115 rev = int(rev)
1114 1116
1115 1117 if util.safehasattr(repo.manifestlog, b'getstorage'):
1116 1118 t = repo.manifestlog.getstorage(b'').node(rev)
1117 1119 else:
1118 1120 t = repo.manifestlog._revlog.lookup(rev)
1119 1121 except ValueError:
1120 1122 raise error.Abort(b'manifest revision must be integer or full '
1121 1123 b'node')
1122 1124 def d():
1123 1125 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1124 1126 repo.manifestlog[t].read()
1125 1127 timer(d)
1126 1128 fm.end()
1127 1129
1128 1130 @command(b'perfchangeset', formatteropts)
1129 1131 def perfchangeset(ui, repo, rev, **opts):
1130 1132 opts = _byteskwargs(opts)
1131 1133 timer, fm = gettimer(ui, opts)
1132 1134 n = scmutil.revsingle(repo, rev).node()
1133 1135 def d():
1134 1136 repo.changelog.read(n)
1135 1137 #repo.changelog._cache = None
1136 1138 timer(d)
1137 1139 fm.end()
1138 1140
1139 1141 @command(b'perfignore', formatteropts)
1140 1142 def perfignore(ui, repo, **opts):
1141 1143 """benchmark operation related to computing ignore"""
1142 1144 opts = _byteskwargs(opts)
1143 1145 timer, fm = gettimer(ui, opts)
1144 1146 dirstate = repo.dirstate
1145 1147
1146 1148 def setupone():
1147 1149 dirstate.invalidate()
1148 1150 clearfilecache(dirstate, b'_ignore')
1149 1151
1150 1152 def runone():
1151 1153 dirstate._ignore
1152 1154
1153 1155 timer(runone, setup=setupone, title=b"load")
1154 1156 fm.end()
1155 1157
1156 1158 @command(b'perfindex', [
1157 1159 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1158 1160 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1159 1161 ] + formatteropts)
1160 1162 def perfindex(ui, repo, **opts):
1161 1163 """benchmark index creation time followed by a lookup
1162 1164
1163 1165 The default is to look `tip` up. Depending on the index implementation,
1164 1166 the revision looked up can matters. For example, an implementation
1165 1167 scanning the index will have a faster lookup time for `--rev tip` than for
1166 1168 `--rev 0`. The number of looked up revisions and their order can also
1167 1169 matters.
1168 1170
1169 1171 Example of useful set to test:
1170 1172 * tip
1171 1173 * 0
1172 1174 * -10:
1173 1175 * :10
1174 1176 * -10: + :10
1175 1177 * :10: + -10:
1176 1178 * -10000:
1177 1179 * -10000: + 0
1178 1180
1179 1181 It is not currently possible to check for lookup of a missing node. For
1180 1182 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1181 1183 import mercurial.revlog
1182 1184 opts = _byteskwargs(opts)
1183 1185 timer, fm = gettimer(ui, opts)
1184 1186 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1185 1187 if opts[b'no_lookup']:
1186 1188 if opts['rev']:
1187 1189 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1188 1190 nodes = []
1189 1191 elif not opts[b'rev']:
1190 1192 nodes = [repo[b"tip"].node()]
1191 1193 else:
1192 1194 revs = scmutil.revrange(repo, opts[b'rev'])
1193 1195 cl = repo.changelog
1194 1196 nodes = [cl.node(r) for r in revs]
1195 1197
1196 1198 unfi = repo.unfiltered()
1197 1199 # find the filecache func directly
1198 1200 # This avoid polluting the benchmark with the filecache logic
1199 1201 makecl = unfi.__class__.changelog.func
1200 1202 def setup():
1201 1203 # probably not necessary, but for good measure
1202 1204 clearchangelog(unfi)
1203 1205 def d():
1204 1206 cl = makecl(unfi)
1205 1207 for n in nodes:
1206 1208 cl.rev(n)
1207 1209 timer(d, setup=setup)
1208 1210 fm.end()
1209 1211
1210 1212 @command(b'perfnodemap', [
1211 1213 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1212 1214 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1213 1215 ] + formatteropts)
1214 1216 def perfnodemap(ui, repo, **opts):
1215 1217 """benchmark the time necessary to look up revision from a cold nodemap
1216 1218
1217 1219 Depending on the implementation, the amount and order of revision we look
1218 1220 up can varies. Example of useful set to test:
1219 1221 * tip
1220 1222 * 0
1221 1223 * -10:
1222 1224 * :10
1223 1225 * -10: + :10
1224 1226 * :10: + -10:
1225 1227 * -10000:
1226 1228 * -10000: + 0
1227 1229
1228 1230 The command currently focus on valid binary lookup. Benchmarking for
1229 1231 hexlookup, prefix lookup and missing lookup would also be valuable.
1230 1232 """
1231 1233 import mercurial.revlog
1232 1234 opts = _byteskwargs(opts)
1233 1235 timer, fm = gettimer(ui, opts)
1234 1236 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1235 1237
1236 1238 unfi = repo.unfiltered()
1237 1239 clearcaches = opts['clear_caches']
1238 1240 # find the filecache func directly
1239 1241 # This avoid polluting the benchmark with the filecache logic
1240 1242 makecl = unfi.__class__.changelog.func
1241 1243 if not opts[b'rev']:
1242 1244 raise error.Abort('use --rev to specify revisions to look up')
1243 1245 revs = scmutil.revrange(repo, opts[b'rev'])
1244 1246 cl = repo.changelog
1245 1247 nodes = [cl.node(r) for r in revs]
1246 1248
1247 1249 # use a list to pass reference to a nodemap from one closure to the next
1248 1250 nodeget = [None]
1249 1251 def setnodeget():
1250 1252 # probably not necessary, but for good measure
1251 1253 clearchangelog(unfi)
1252 1254 nodeget[0] = makecl(unfi).nodemap.get
1253 1255
1254 1256 def d():
1255 1257 get = nodeget[0]
1256 1258 for n in nodes:
1257 1259 get(n)
1258 1260
1259 1261 setup = None
1260 1262 if clearcaches:
1261 1263 def setup():
1262 1264 setnodeget()
1263 1265 else:
1264 1266 setnodeget()
1265 1267 d() # prewarm the data structure
1266 1268 timer(d, setup=setup)
1267 1269 fm.end()
1268 1270
1269 1271 @command(b'perfstartup', formatteropts)
1270 1272 def perfstartup(ui, repo, **opts):
1271 1273 opts = _byteskwargs(opts)
1272 1274 timer, fm = gettimer(ui, opts)
1273 1275 def d():
1274 1276 if os.name != r'nt':
1275 1277 os.system(b"HGRCPATH= %s version -q > /dev/null" %
1276 1278 fsencode(sys.argv[0]))
1277 1279 else:
1278 1280 os.environ[r'HGRCPATH'] = r' '
1279 1281 os.system(r"%s version -q > NUL" % sys.argv[0])
1280 1282 timer(d)
1281 1283 fm.end()
1282 1284
1283 1285 @command(b'perfparents', formatteropts)
1284 1286 def perfparents(ui, repo, **opts):
1285 1287 """benchmark the time necessary to fetch one changeset's parents.
1286 1288
1287 1289 The fetch is done using the `node identifier`, traversing all object layers
1288 1290 from the repository object. The first N revisions will be used for this
1289 1291 benchmark. N is controlled by the ``perf.parentscount`` config option
1290 1292 (default: 1000).
1291 1293 """
1292 1294 opts = _byteskwargs(opts)
1293 1295 timer, fm = gettimer(ui, opts)
1294 1296 # control the number of commits perfparents iterates over
1295 1297 # experimental config: perf.parentscount
1296 1298 count = getint(ui, b"perf", b"parentscount", 1000)
1297 1299 if len(repo.changelog) < count:
1298 1300 raise error.Abort(b"repo needs %d commits for this test" % count)
1299 1301 repo = repo.unfiltered()
1300 1302 nl = [repo.changelog.node(i) for i in _xrange(count)]
1301 1303 def d():
1302 1304 for n in nl:
1303 1305 repo.changelog.parents(n)
1304 1306 timer(d)
1305 1307 fm.end()
1306 1308
1307 1309 @command(b'perfctxfiles', formatteropts)
1308 1310 def perfctxfiles(ui, repo, x, **opts):
1309 1311 opts = _byteskwargs(opts)
1310 1312 x = int(x)
1311 1313 timer, fm = gettimer(ui, opts)
1312 1314 def d():
1313 1315 len(repo[x].files())
1314 1316 timer(d)
1315 1317 fm.end()
1316 1318
1317 1319 @command(b'perfrawfiles', formatteropts)
1318 1320 def perfrawfiles(ui, repo, x, **opts):
1319 1321 opts = _byteskwargs(opts)
1320 1322 x = int(x)
1321 1323 timer, fm = gettimer(ui, opts)
1322 1324 cl = repo.changelog
1323 1325 def d():
1324 1326 len(cl.read(x)[3])
1325 1327 timer(d)
1326 1328 fm.end()
1327 1329
1328 1330 @command(b'perflookup', formatteropts)
1329 1331 def perflookup(ui, repo, rev, **opts):
1330 1332 opts = _byteskwargs(opts)
1331 1333 timer, fm = gettimer(ui, opts)
1332 1334 timer(lambda: len(repo.lookup(rev)))
1333 1335 fm.end()
1334 1336
1335 1337 @command(b'perflinelogedits',
1336 1338 [(b'n', b'edits', 10000, b'number of edits'),
1337 1339 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1338 1340 ], norepo=True)
1339 1341 def perflinelogedits(ui, **opts):
1340 1342 from mercurial import linelog
1341 1343
1342 1344 opts = _byteskwargs(opts)
1343 1345
1344 1346 edits = opts[b'edits']
1345 1347 maxhunklines = opts[b'max_hunk_lines']
1346 1348
1347 1349 maxb1 = 100000
1348 1350 random.seed(0)
1349 1351 randint = random.randint
1350 1352 currentlines = 0
1351 1353 arglist = []
1352 1354 for rev in _xrange(edits):
1353 1355 a1 = randint(0, currentlines)
1354 1356 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1355 1357 b1 = randint(0, maxb1)
1356 1358 b2 = randint(b1, b1 + maxhunklines)
1357 1359 currentlines += (b2 - b1) - (a2 - a1)
1358 1360 arglist.append((rev, a1, a2, b1, b2))
1359 1361
1360 1362 def d():
1361 1363 ll = linelog.linelog()
1362 1364 for args in arglist:
1363 1365 ll.replacelines(*args)
1364 1366
1365 1367 timer, fm = gettimer(ui, opts)
1366 1368 timer(d)
1367 1369 fm.end()
1368 1370
1369 1371 @command(b'perfrevrange', formatteropts)
1370 1372 def perfrevrange(ui, repo, *specs, **opts):
1371 1373 opts = _byteskwargs(opts)
1372 1374 timer, fm = gettimer(ui, opts)
1373 1375 revrange = scmutil.revrange
1374 1376 timer(lambda: len(revrange(repo, specs)))
1375 1377 fm.end()
1376 1378
1377 1379 @command(b'perfnodelookup', formatteropts)
1378 1380 def perfnodelookup(ui, repo, rev, **opts):
1379 1381 opts = _byteskwargs(opts)
1380 1382 timer, fm = gettimer(ui, opts)
1381 1383 import mercurial.revlog
1382 1384 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1383 1385 n = scmutil.revsingle(repo, rev).node()
1384 1386 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1385 1387 def d():
1386 1388 cl.rev(n)
1387 1389 clearcaches(cl)
1388 1390 timer(d)
1389 1391 fm.end()
1390 1392
1391 1393 @command(b'perflog',
1392 1394 [(b'', b'rename', False, b'ask log to follow renames')
1393 1395 ] + formatteropts)
1394 1396 def perflog(ui, repo, rev=None, **opts):
1395 1397 opts = _byteskwargs(opts)
1396 1398 if rev is None:
1397 1399 rev=[]
1398 1400 timer, fm = gettimer(ui, opts)
1399 1401 ui.pushbuffer()
1400 1402 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1401 1403 copies=opts.get(b'rename')))
1402 1404 ui.popbuffer()
1403 1405 fm.end()
1404 1406
1405 1407 @command(b'perfmoonwalk', formatteropts)
1406 1408 def perfmoonwalk(ui, repo, **opts):
1407 1409 """benchmark walking the changelog backwards
1408 1410
1409 1411 This also loads the changelog data for each revision in the changelog.
1410 1412 """
1411 1413 opts = _byteskwargs(opts)
1412 1414 timer, fm = gettimer(ui, opts)
1413 1415 def moonwalk():
1414 1416 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1415 1417 ctx = repo[i]
1416 1418 ctx.branch() # read changelog data (in addition to the index)
1417 1419 timer(moonwalk)
1418 1420 fm.end()
1419 1421
1420 1422 @command(b'perftemplating',
1421 1423 [(b'r', b'rev', [], b'revisions to run the template on'),
1422 1424 ] + formatteropts)
1423 1425 def perftemplating(ui, repo, testedtemplate=None, **opts):
1424 1426 """test the rendering time of a given template"""
1425 1427 if makelogtemplater is None:
1426 1428 raise error.Abort((b"perftemplating not available with this Mercurial"),
1427 1429 hint=b"use 4.3 or later")
1428 1430
1429 1431 opts = _byteskwargs(opts)
1430 1432
1431 1433 nullui = ui.copy()
1432 1434 nullui.fout = open(os.devnull, r'wb')
1433 1435 nullui.disablepager()
1434 1436 revs = opts.get(b'rev')
1435 1437 if not revs:
1436 1438 revs = [b'all()']
1437 1439 revs = list(scmutil.revrange(repo, revs))
1438 1440
1439 1441 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1440 1442 b' {author|person}: {desc|firstline}\n')
1441 1443 if testedtemplate is None:
1442 1444 testedtemplate = defaulttemplate
1443 1445 displayer = makelogtemplater(nullui, repo, testedtemplate)
1444 1446 def format():
1445 1447 for r in revs:
1446 1448 ctx = repo[r]
1447 1449 displayer.show(ctx)
1448 1450 displayer.flush(ctx)
1449 1451
1450 1452 timer, fm = gettimer(ui, opts)
1451 1453 timer(format)
1452 1454 fm.end()
1453 1455
1454 1456 @command(b'perfhelper-pathcopies', formatteropts +
1455 1457 [
1456 1458 (b'r', b'revs', [], b'restrict search to these revisions'),
1457 1459 (b'', b'timing', False, b'provides extra data (costly)'),
1458 1460 ])
1459 1461 def perfhelperpathcopies(ui, repo, revs=[], **opts):
1460 1462 """find statistic about potential parameters for the `perftracecopies`
1461 1463
1462 1464 This command find source-destination pair relevant for copytracing testing.
1463 1465 It report value for some of the parameters that impact copy tracing time.
1464 1466
1465 1467 If `--timing` is set, rename detection is run and the associated timing
1466 1468 will be reported. The extra details comes at the cost of a slower command
1467 1469 execution.
1468 1470
1469 1471 Since the rename detection is only run once, other factors might easily
1470 1472 affect the precision of the timing. However it should give a good
1471 1473 approximation of which revision pairs are very costly.
1472 1474 """
1473 1475 opts = _byteskwargs(opts)
1474 1476 fm = ui.formatter(b'perf', opts)
1475 1477 dotiming = opts[b'timing']
1476 1478
1477 1479 if dotiming:
1478 1480 header = '%12s %12s %12s %12s %12s %12s\n'
1479 1481 output = ("%(source)12s %(destination)12s "
1480 1482 "%(nbrevs)12d %(nbmissingfiles)12d "
1481 1483 "%(nbrenamedfiles)12d %(time)18.5f\n")
1482 1484 header_names = ("source", "destination", "nb-revs", "nb-files",
1483 1485 "nb-renames", "time")
1484 1486 fm.plain(header % header_names)
1485 1487 else:
1486 1488 header = '%12s %12s %12s %12s\n'
1487 1489 output = ("%(source)12s %(destination)12s "
1488 1490 "%(nbrevs)12d %(nbmissingfiles)12d\n")
1489 1491 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
1490 1492
1491 1493 if not revs:
1492 1494 revs = ['all()']
1493 1495 revs = scmutil.revrange(repo, revs)
1494 1496
1495 1497 roi = repo.revs('merge() and %ld', revs)
1496 1498 for r in roi:
1497 1499 ctx = repo[r]
1498 1500 p1 = ctx.p1().rev()
1499 1501 p2 = ctx.p2().rev()
1500 1502 bases = repo.changelog._commonancestorsheads(p1, p2)
1501 1503 for p in (p1, p2):
1502 1504 for b in bases:
1503 1505 base = repo[b]
1504 1506 parent = repo[p]
1505 1507 missing = copies._computeforwardmissing(base, parent)
1506 1508 if not missing:
1507 1509 continue
1508 1510 data = {
1509 1511 b'source': base.hex(),
1510 1512 b'destination': parent.hex(),
1511 1513 b'nbrevs': len(repo.revs('%d::%d', b, p)),
1512 1514 b'nbmissingfiles': len(missing),
1513 1515 }
1514 1516 if dotiming:
1515 1517 begin = util.timer()
1516 1518 renames = copies.pathcopies(base, parent)
1517 1519 end = util.timer()
1518 1520 # not very stable timing since we did only one run
1519 1521 data['time'] = end - begin
1520 1522 data['nbrenamedfiles'] = len(renames)
1521 1523 fm.startitem()
1522 1524 fm.data(**data)
1523 1525 out = data.copy()
1524 1526 out['source'] = fm.hexfunc(base.node())
1525 1527 out['destination'] = fm.hexfunc(parent.node())
1526 1528 fm.plain(output % out)
1527 1529
1528 1530 fm.end()
1529 1531
1530 1532 @command(b'perfcca', formatteropts)
1531 1533 def perfcca(ui, repo, **opts):
1532 1534 opts = _byteskwargs(opts)
1533 1535 timer, fm = gettimer(ui, opts)
1534 1536 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1535 1537 fm.end()
1536 1538
1537 1539 @command(b'perffncacheload', formatteropts)
1538 1540 def perffncacheload(ui, repo, **opts):
1539 1541 opts = _byteskwargs(opts)
1540 1542 timer, fm = gettimer(ui, opts)
1541 1543 s = repo.store
1542 1544 def d():
1543 1545 s.fncache._load()
1544 1546 timer(d)
1545 1547 fm.end()
1546 1548
1547 1549 @command(b'perffncachewrite', formatteropts)
1548 1550 def perffncachewrite(ui, repo, **opts):
1549 1551 opts = _byteskwargs(opts)
1550 1552 timer, fm = gettimer(ui, opts)
1551 1553 s = repo.store
1552 1554 lock = repo.lock()
1553 1555 s.fncache._load()
1554 1556 tr = repo.transaction(b'perffncachewrite')
1555 1557 tr.addbackup(b'fncache')
1556 1558 def d():
1557 1559 s.fncache._dirty = True
1558 1560 s.fncache.write(tr)
1559 1561 timer(d)
1560 1562 tr.close()
1561 1563 lock.release()
1562 1564 fm.end()
1563 1565
1564 1566 @command(b'perffncacheencode', formatteropts)
1565 1567 def perffncacheencode(ui, repo, **opts):
1566 1568 opts = _byteskwargs(opts)
1567 1569 timer, fm = gettimer(ui, opts)
1568 1570 s = repo.store
1569 1571 s.fncache._load()
1570 1572 def d():
1571 1573 for p in s.fncache.entries:
1572 1574 s.encode(p)
1573 1575 timer(d)
1574 1576 fm.end()
1575 1577
1576 1578 def _bdiffworker(q, blocks, xdiff, ready, done):
1577 1579 while not done.is_set():
1578 1580 pair = q.get()
1579 1581 while pair is not None:
1580 1582 if xdiff:
1581 1583 mdiff.bdiff.xdiffblocks(*pair)
1582 1584 elif blocks:
1583 1585 mdiff.bdiff.blocks(*pair)
1584 1586 else:
1585 1587 mdiff.textdiff(*pair)
1586 1588 q.task_done()
1587 1589 pair = q.get()
1588 1590 q.task_done() # for the None one
1589 1591 with ready:
1590 1592 ready.wait()
1591 1593
1592 1594 def _manifestrevision(repo, mnode):
1593 1595 ml = repo.manifestlog
1594 1596
1595 1597 if util.safehasattr(ml, b'getstorage'):
1596 1598 store = ml.getstorage(b'')
1597 1599 else:
1598 1600 store = ml._revlog
1599 1601
1600 1602 return store.revision(mnode)
1601 1603
1602 1604 @command(b'perfbdiff', revlogopts + formatteropts + [
1603 1605 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1604 1606 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1605 1607 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1606 1608 (b'', b'blocks', False, b'test computing diffs into blocks'),
1607 1609 (b'', b'xdiff', False, b'use xdiff algorithm'),
1608 1610 ],
1609 1611
1610 1612 b'-c|-m|FILE REV')
1611 1613 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1612 1614 """benchmark a bdiff between revisions
1613 1615
1614 1616 By default, benchmark a bdiff between its delta parent and itself.
1615 1617
1616 1618 With ``--count``, benchmark bdiffs between delta parents and self for N
1617 1619 revisions starting at the specified revision.
1618 1620
1619 1621 With ``--alldata``, assume the requested revision is a changeset and
1620 1622 measure bdiffs for all changes related to that changeset (manifest
1621 1623 and filelogs).
1622 1624 """
1623 1625 opts = _byteskwargs(opts)
1624 1626
1625 1627 if opts[b'xdiff'] and not opts[b'blocks']:
1626 1628 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1627 1629
1628 1630 if opts[b'alldata']:
1629 1631 opts[b'changelog'] = True
1630 1632
1631 1633 if opts.get(b'changelog') or opts.get(b'manifest'):
1632 1634 file_, rev = None, file_
1633 1635 elif rev is None:
1634 1636 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1635 1637
1636 1638 blocks = opts[b'blocks']
1637 1639 xdiff = opts[b'xdiff']
1638 1640 textpairs = []
1639 1641
1640 1642 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1641 1643
1642 1644 startrev = r.rev(r.lookup(rev))
1643 1645 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1644 1646 if opts[b'alldata']:
1645 1647 # Load revisions associated with changeset.
1646 1648 ctx = repo[rev]
1647 1649 mtext = _manifestrevision(repo, ctx.manifestnode())
1648 1650 for pctx in ctx.parents():
1649 1651 pman = _manifestrevision(repo, pctx.manifestnode())
1650 1652 textpairs.append((pman, mtext))
1651 1653
1652 1654 # Load filelog revisions by iterating manifest delta.
1653 1655 man = ctx.manifest()
1654 1656 pman = ctx.p1().manifest()
1655 1657 for filename, change in pman.diff(man).items():
1656 1658 fctx = repo.file(filename)
1657 1659 f1 = fctx.revision(change[0][0] or -1)
1658 1660 f2 = fctx.revision(change[1][0] or -1)
1659 1661 textpairs.append((f1, f2))
1660 1662 else:
1661 1663 dp = r.deltaparent(rev)
1662 1664 textpairs.append((r.revision(dp), r.revision(rev)))
1663 1665
1664 1666 withthreads = threads > 0
1665 1667 if not withthreads:
1666 1668 def d():
1667 1669 for pair in textpairs:
1668 1670 if xdiff:
1669 1671 mdiff.bdiff.xdiffblocks(*pair)
1670 1672 elif blocks:
1671 1673 mdiff.bdiff.blocks(*pair)
1672 1674 else:
1673 1675 mdiff.textdiff(*pair)
1674 1676 else:
1675 1677 q = queue()
1676 1678 for i in _xrange(threads):
1677 1679 q.put(None)
1678 1680 ready = threading.Condition()
1679 1681 done = threading.Event()
1680 1682 for i in _xrange(threads):
1681 1683 threading.Thread(target=_bdiffworker,
1682 1684 args=(q, blocks, xdiff, ready, done)).start()
1683 1685 q.join()
1684 1686 def d():
1685 1687 for pair in textpairs:
1686 1688 q.put(pair)
1687 1689 for i in _xrange(threads):
1688 1690 q.put(None)
1689 1691 with ready:
1690 1692 ready.notify_all()
1691 1693 q.join()
1692 1694 timer, fm = gettimer(ui, opts)
1693 1695 timer(d)
1694 1696 fm.end()
1695 1697
1696 1698 if withthreads:
1697 1699 done.set()
1698 1700 for i in _xrange(threads):
1699 1701 q.put(None)
1700 1702 with ready:
1701 1703 ready.notify_all()
1702 1704
1703 1705 @command(b'perfunidiff', revlogopts + formatteropts + [
1704 1706 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1705 1707 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
1706 1708 ], b'-c|-m|FILE REV')
1707 1709 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1708 1710 """benchmark a unified diff between revisions
1709 1711
1710 1712 This doesn't include any copy tracing - it's just a unified diff
1711 1713 of the texts.
1712 1714
1713 1715 By default, benchmark a diff between its delta parent and itself.
1714 1716
1715 1717 With ``--count``, benchmark diffs between delta parents and self for N
1716 1718 revisions starting at the specified revision.
1717 1719
1718 1720 With ``--alldata``, assume the requested revision is a changeset and
1719 1721 measure diffs for all changes related to that changeset (manifest
1720 1722 and filelogs).
1721 1723 """
1722 1724 opts = _byteskwargs(opts)
1723 1725 if opts[b'alldata']:
1724 1726 opts[b'changelog'] = True
1725 1727
1726 1728 if opts.get(b'changelog') or opts.get(b'manifest'):
1727 1729 file_, rev = None, file_
1728 1730 elif rev is None:
1729 1731 raise error.CommandError(b'perfunidiff', b'invalid arguments')
1730 1732
1731 1733 textpairs = []
1732 1734
1733 1735 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
1734 1736
1735 1737 startrev = r.rev(r.lookup(rev))
1736 1738 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1737 1739 if opts[b'alldata']:
1738 1740 # Load revisions associated with changeset.
1739 1741 ctx = repo[rev]
1740 1742 mtext = _manifestrevision(repo, ctx.manifestnode())
1741 1743 for pctx in ctx.parents():
1742 1744 pman = _manifestrevision(repo, pctx.manifestnode())
1743 1745 textpairs.append((pman, mtext))
1744 1746
1745 1747 # Load filelog revisions by iterating manifest delta.
1746 1748 man = ctx.manifest()
1747 1749 pman = ctx.p1().manifest()
1748 1750 for filename, change in pman.diff(man).items():
1749 1751 fctx = repo.file(filename)
1750 1752 f1 = fctx.revision(change[0][0] or -1)
1751 1753 f2 = fctx.revision(change[1][0] or -1)
1752 1754 textpairs.append((f1, f2))
1753 1755 else:
1754 1756 dp = r.deltaparent(rev)
1755 1757 textpairs.append((r.revision(dp), r.revision(rev)))
1756 1758
1757 1759 def d():
1758 1760 for left, right in textpairs:
1759 1761 # The date strings don't matter, so we pass empty strings.
1760 1762 headerlines, hunks = mdiff.unidiff(
1761 1763 left, b'', right, b'', b'left', b'right', binary=False)
1762 1764 # consume iterators in roughly the way patch.py does
1763 1765 b'\n'.join(headerlines)
1764 1766 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1765 1767 timer, fm = gettimer(ui, opts)
1766 1768 timer(d)
1767 1769 fm.end()
1768 1770
1769 1771 @command(b'perfdiffwd', formatteropts)
1770 1772 def perfdiffwd(ui, repo, **opts):
1771 1773 """Profile diff of working directory changes"""
1772 1774 opts = _byteskwargs(opts)
1773 1775 timer, fm = gettimer(ui, opts)
1774 1776 options = {
1775 1777 'w': 'ignore_all_space',
1776 1778 'b': 'ignore_space_change',
1777 1779 'B': 'ignore_blank_lines',
1778 1780 }
1779 1781
1780 1782 for diffopt in ('', 'w', 'b', 'B', 'wB'):
1781 1783 opts = dict((options[c], b'1') for c in diffopt)
1782 1784 def d():
1783 1785 ui.pushbuffer()
1784 1786 commands.diff(ui, repo, **opts)
1785 1787 ui.popbuffer()
1786 1788 diffopt = diffopt.encode('ascii')
1787 1789 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
1788 1790 timer(d, title=title)
1789 1791 fm.end()
1790 1792
1791 1793 @command(b'perfrevlogindex', revlogopts + formatteropts,
1792 1794 b'-c|-m|FILE')
1793 1795 def perfrevlogindex(ui, repo, file_=None, **opts):
1794 1796 """Benchmark operations against a revlog index.
1795 1797
1796 1798 This tests constructing a revlog instance, reading index data,
1797 1799 parsing index data, and performing various operations related to
1798 1800 index data.
1799 1801 """
1800 1802
1801 1803 opts = _byteskwargs(opts)
1802 1804
1803 1805 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
1804 1806
1805 1807 opener = getattr(rl, 'opener') # trick linter
1806 1808 indexfile = rl.indexfile
1807 1809 data = opener.read(indexfile)
1808 1810
1809 1811 header = struct.unpack(b'>I', data[0:4])[0]
1810 1812 version = header & 0xFFFF
1811 1813 if version == 1:
1812 1814 revlogio = revlog.revlogio()
1813 1815 inline = header & (1 << 16)
1814 1816 else:
1815 1817 raise error.Abort((b'unsupported revlog version: %d') % version)
1816 1818
1817 1819 rllen = len(rl)
1818 1820
1819 1821 node0 = rl.node(0)
1820 1822 node25 = rl.node(rllen // 4)
1821 1823 node50 = rl.node(rllen // 2)
1822 1824 node75 = rl.node(rllen // 4 * 3)
1823 1825 node100 = rl.node(rllen - 1)
1824 1826
1825 1827 allrevs = range(rllen)
1826 1828 allrevsrev = list(reversed(allrevs))
1827 1829 allnodes = [rl.node(rev) for rev in range(rllen)]
1828 1830 allnodesrev = list(reversed(allnodes))
1829 1831
1830 1832 def constructor():
1831 1833 revlog.revlog(opener, indexfile)
1832 1834
1833 1835 def read():
1834 1836 with opener(indexfile) as fh:
1835 1837 fh.read()
1836 1838
1837 1839 def parseindex():
1838 1840 revlogio.parseindex(data, inline)
1839 1841
1840 1842 def getentry(revornode):
1841 1843 index = revlogio.parseindex(data, inline)[0]
1842 1844 index[revornode]
1843 1845
1844 1846 def getentries(revs, count=1):
1845 1847 index = revlogio.parseindex(data, inline)[0]
1846 1848
1847 1849 for i in range(count):
1848 1850 for rev in revs:
1849 1851 index[rev]
1850 1852
1851 1853 def resolvenode(node):
1852 1854 nodemap = revlogio.parseindex(data, inline)[1]
1853 1855 # This only works for the C code.
1854 1856 if nodemap is None:
1855 1857 return
1856 1858
1857 1859 try:
1858 1860 nodemap[node]
1859 1861 except error.RevlogError:
1860 1862 pass
1861 1863
1862 1864 def resolvenodes(nodes, count=1):
1863 1865 nodemap = revlogio.parseindex(data, inline)[1]
1864 1866 if nodemap is None:
1865 1867 return
1866 1868
1867 1869 for i in range(count):
1868 1870 for node in nodes:
1869 1871 try:
1870 1872 nodemap[node]
1871 1873 except error.RevlogError:
1872 1874 pass
1873 1875
1874 1876 benches = [
1875 1877 (constructor, b'revlog constructor'),
1876 1878 (read, b'read'),
1877 1879 (parseindex, b'create index object'),
1878 1880 (lambda: getentry(0), b'retrieve index entry for rev 0'),
1879 1881 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
1880 1882 (lambda: resolvenode(node0), b'look up node at rev 0'),
1881 1883 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
1882 1884 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
1883 1885 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
1884 1886 (lambda: resolvenode(node100), b'look up node at tip'),
1885 1887 # 2x variation is to measure caching impact.
1886 1888 (lambda: resolvenodes(allnodes),
1887 1889 b'look up all nodes (forward)'),
1888 1890 (lambda: resolvenodes(allnodes, 2),
1889 1891 b'look up all nodes 2x (forward)'),
1890 1892 (lambda: resolvenodes(allnodesrev),
1891 1893 b'look up all nodes (reverse)'),
1892 1894 (lambda: resolvenodes(allnodesrev, 2),
1893 1895 b'look up all nodes 2x (reverse)'),
1894 1896 (lambda: getentries(allrevs),
1895 1897 b'retrieve all index entries (forward)'),
1896 1898 (lambda: getentries(allrevs, 2),
1897 1899 b'retrieve all index entries 2x (forward)'),
1898 1900 (lambda: getentries(allrevsrev),
1899 1901 b'retrieve all index entries (reverse)'),
1900 1902 (lambda: getentries(allrevsrev, 2),
1901 1903 b'retrieve all index entries 2x (reverse)'),
1902 1904 ]
1903 1905
1904 1906 for fn, title in benches:
1905 1907 timer, fm = gettimer(ui, opts)
1906 1908 timer(fn, title=title)
1907 1909 fm.end()
1908 1910
1909 1911 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
1910 1912 [(b'd', b'dist', 100, b'distance between the revisions'),
1911 1913 (b's', b'startrev', 0, b'revision to start reading at'),
1912 1914 (b'', b'reverse', False, b'read in reverse')],
1913 1915 b'-c|-m|FILE')
1914 1916 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
1915 1917 **opts):
1916 1918 """Benchmark reading a series of revisions from a revlog.
1917 1919
1918 1920 By default, we read every ``-d/--dist`` revision from 0 to tip of
1919 1921 the specified revlog.
1920 1922
1921 1923 The start revision can be defined via ``-s/--startrev``.
1922 1924 """
1923 1925 opts = _byteskwargs(opts)
1924 1926
1925 1927 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
1926 1928 rllen = getlen(ui)(rl)
1927 1929
1928 1930 if startrev < 0:
1929 1931 startrev = rllen + startrev
1930 1932
1931 1933 def d():
1932 1934 rl.clearcaches()
1933 1935
1934 1936 beginrev = startrev
1935 1937 endrev = rllen
1936 1938 dist = opts[b'dist']
1937 1939
1938 1940 if reverse:
1939 1941 beginrev, endrev = endrev - 1, beginrev - 1
1940 1942 dist = -1 * dist
1941 1943
1942 1944 for x in _xrange(beginrev, endrev, dist):
1943 1945 # Old revisions don't support passing int.
1944 1946 n = rl.node(x)
1945 1947 rl.revision(n)
1946 1948
1947 1949 timer, fm = gettimer(ui, opts)
1948 1950 timer(d)
1949 1951 fm.end()
1950 1952
1951 1953 @command(b'perfrevlogwrite', revlogopts + formatteropts +
1952 1954 [(b's', b'startrev', 1000, b'revision to start writing at'),
1953 1955 (b'', b'stoprev', -1, b'last revision to write'),
1954 1956 (b'', b'count', 3, b'last revision to write'),
1955 1957 (b'', b'details', False, b'print timing for every revisions tested'),
1956 1958 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
1957 1959 (b'', b'lazydeltabase', True, b'try the provided delta first'),
1958 1960 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1959 1961 ],
1960 1962 b'-c|-m|FILE')
1961 1963 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
1962 1964 """Benchmark writing a series of revisions to a revlog.
1963 1965
1964 1966 Possible source values are:
1965 1967 * `full`: add from a full text (default).
1966 1968 * `parent-1`: add from a delta to the first parent
1967 1969 * `parent-2`: add from a delta to the second parent if it exists
1968 1970 (use a delta from the first parent otherwise)
1969 1971 * `parent-smallest`: add from the smallest delta (either p1 or p2)
1970 1972 * `storage`: add from the existing precomputed deltas
1971 1973 """
1972 1974 opts = _byteskwargs(opts)
1973 1975
1974 1976 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
1975 1977 rllen = getlen(ui)(rl)
1976 1978 if startrev < 0:
1977 1979 startrev = rllen + startrev
1978 1980 if stoprev < 0:
1979 1981 stoprev = rllen + stoprev
1980 1982
1981 1983 lazydeltabase = opts['lazydeltabase']
1982 1984 source = opts['source']
1983 1985 clearcaches = opts['clear_caches']
1984 1986 validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest',
1985 1987 b'storage')
1986 1988 if source not in validsource:
1987 1989 raise error.Abort('invalid source type: %s' % source)
1988 1990
1989 1991 ### actually gather results
1990 1992 count = opts['count']
1991 1993 if count <= 0:
1992 1994 raise error.Abort('invalide run count: %d' % count)
1993 1995 allresults = []
1994 1996 for c in range(count):
1995 1997 timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1,
1996 1998 lazydeltabase=lazydeltabase,
1997 1999 clearcaches=clearcaches)
1998 2000 allresults.append(timing)
1999 2001
2000 2002 ### consolidate the results in a single list
2001 2003 results = []
2002 2004 for idx, (rev, t) in enumerate(allresults[0]):
2003 2005 ts = [t]
2004 2006 for other in allresults[1:]:
2005 2007 orev, ot = other[idx]
2006 2008 assert orev == rev
2007 2009 ts.append(ot)
2008 2010 results.append((rev, ts))
2009 2011 resultcount = len(results)
2010 2012
2011 2013 ### Compute and display relevant statistics
2012 2014
2013 2015 # get a formatter
2014 2016 fm = ui.formatter(b'perf', opts)
2015 2017 displayall = ui.configbool(b"perf", b"all-timing", False)
2016 2018
2017 2019 # print individual details if requested
2018 2020 if opts['details']:
2019 2021 for idx, item in enumerate(results, 1):
2020 2022 rev, data = item
2021 2023 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
2022 2024 formatone(fm, data, title=title, displayall=displayall)
2023 2025
2024 2026 # sorts results by median time
2025 2027 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
2026 2028 # list of (name, index) to display)
2027 2029 relevants = [
2028 2030 ("min", 0),
2029 2031 ("10%", resultcount * 10 // 100),
2030 2032 ("25%", resultcount * 25 // 100),
2031 2033 ("50%", resultcount * 70 // 100),
2032 2034 ("75%", resultcount * 75 // 100),
2033 2035 ("90%", resultcount * 90 // 100),
2034 2036 ("95%", resultcount * 95 // 100),
2035 2037 ("99%", resultcount * 99 // 100),
2036 2038 ("99.9%", resultcount * 999 // 1000),
2037 2039 ("99.99%", resultcount * 9999 // 10000),
2038 2040 ("99.999%", resultcount * 99999 // 100000),
2039 2041 ("max", -1),
2040 2042 ]
2041 2043 if not ui.quiet:
2042 2044 for name, idx in relevants:
2043 2045 data = results[idx]
2044 2046 title = '%s of %d, rev %d' % (name, resultcount, data[0])
2045 2047 formatone(fm, data[1], title=title, displayall=displayall)
2046 2048
2047 2049 # XXX summing that many float will not be very precise, we ignore this fact
2048 2050 # for now
2049 2051 totaltime = []
2050 2052 for item in allresults:
2051 2053 totaltime.append((sum(x[1][0] for x in item),
2052 2054 sum(x[1][1] for x in item),
2053 2055 sum(x[1][2] for x in item),)
2054 2056 )
2055 2057 formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
2056 2058 displayall=displayall)
2057 2059 fm.end()
2058 2060
2059 2061 class _faketr(object):
2060 2062 def add(s, x, y, z=None):
2061 2063 return None
2062 2064
2063 2065 def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None,
2064 2066 lazydeltabase=True, clearcaches=True):
2065 2067 timings = []
2066 2068 tr = _faketr()
2067 2069 with _temprevlog(ui, orig, startrev) as dest:
2068 2070 dest._lazydeltabase = lazydeltabase
2069 2071 revs = list(orig.revs(startrev, stoprev))
2070 2072 total = len(revs)
2071 2073 topic = 'adding'
2072 2074 if runidx is not None:
2073 2075 topic += ' (run #%d)' % runidx
2074 2076 # Support both old and new progress API
2075 2077 if util.safehasattr(ui, 'makeprogress'):
2076 2078 progress = ui.makeprogress(topic, unit='revs', total=total)
2077 2079 def updateprogress(pos):
2078 2080 progress.update(pos)
2079 2081 def completeprogress():
2080 2082 progress.complete()
2081 2083 else:
2082 2084 def updateprogress(pos):
2083 2085 ui.progress(topic, pos, unit='revs', total=total)
2084 2086 def completeprogress():
2085 2087 ui.progress(topic, None, unit='revs', total=total)
2086 2088
2087 2089 for idx, rev in enumerate(revs):
2088 2090 updateprogress(idx)
2089 2091 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
2090 2092 if clearcaches:
2091 2093 dest.index.clearcaches()
2092 2094 dest.clearcaches()
2093 2095 with timeone() as r:
2094 2096 dest.addrawrevision(*addargs, **addkwargs)
2095 2097 timings.append((rev, r[0]))
2096 2098 updateprogress(total)
2097 2099 completeprogress()
2098 2100 return timings
2099 2101
2100 2102 def _getrevisionseed(orig, rev, tr, source):
2101 2103 from mercurial.node import nullid
2102 2104
2103 2105 linkrev = orig.linkrev(rev)
2104 2106 node = orig.node(rev)
2105 2107 p1, p2 = orig.parents(node)
2106 2108 flags = orig.flags(rev)
2107 2109 cachedelta = None
2108 2110 text = None
2109 2111
2110 2112 if source == b'full':
2111 2113 text = orig.revision(rev)
2112 2114 elif source == b'parent-1':
2113 2115 baserev = orig.rev(p1)
2114 2116 cachedelta = (baserev, orig.revdiff(p1, rev))
2115 2117 elif source == b'parent-2':
2116 2118 parent = p2
2117 2119 if p2 == nullid:
2118 2120 parent = p1
2119 2121 baserev = orig.rev(parent)
2120 2122 cachedelta = (baserev, orig.revdiff(parent, rev))
2121 2123 elif source == b'parent-smallest':
2122 2124 p1diff = orig.revdiff(p1, rev)
2123 2125 parent = p1
2124 2126 diff = p1diff
2125 2127 if p2 != nullid:
2126 2128 p2diff = orig.revdiff(p2, rev)
2127 2129 if len(p1diff) > len(p2diff):
2128 2130 parent = p2
2129 2131 diff = p2diff
2130 2132 baserev = orig.rev(parent)
2131 2133 cachedelta = (baserev, diff)
2132 2134 elif source == b'storage':
2133 2135 baserev = orig.deltaparent(rev)
2134 2136 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
2135 2137
2136 2138 return ((text, tr, linkrev, p1, p2),
2137 2139 {'node': node, 'flags': flags, 'cachedelta': cachedelta})
2138 2140
2139 2141 @contextlib.contextmanager
2140 2142 def _temprevlog(ui, orig, truncaterev):
2141 2143 from mercurial import vfs as vfsmod
2142 2144
2143 2145 if orig._inline:
2144 2146 raise error.Abort('not supporting inline revlog (yet)')
2145 2147
2146 2148 origindexpath = orig.opener.join(orig.indexfile)
2147 2149 origdatapath = orig.opener.join(orig.datafile)
2148 2150 indexname = 'revlog.i'
2149 2151 dataname = 'revlog.d'
2150 2152
2151 2153 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
2152 2154 try:
2153 2155 # copy the data file in a temporary directory
2154 2156 ui.debug('copying data in %s\n' % tmpdir)
2155 2157 destindexpath = os.path.join(tmpdir, 'revlog.i')
2156 2158 destdatapath = os.path.join(tmpdir, 'revlog.d')
2157 2159 shutil.copyfile(origindexpath, destindexpath)
2158 2160 shutil.copyfile(origdatapath, destdatapath)
2159 2161
2160 2162 # remove the data we want to add again
2161 2163 ui.debug('truncating data to be rewritten\n')
2162 2164 with open(destindexpath, 'ab') as index:
2163 2165 index.seek(0)
2164 2166 index.truncate(truncaterev * orig._io.size)
2165 2167 with open(destdatapath, 'ab') as data:
2166 2168 data.seek(0)
2167 2169 data.truncate(orig.start(truncaterev))
2168 2170
2169 2171 # instantiate a new revlog from the temporary copy
2170 2172 ui.debug('truncating adding to be rewritten\n')
2171 2173 vfs = vfsmod.vfs(tmpdir)
2172 2174 vfs.options = getattr(orig.opener, 'options', None)
2173 2175
2174 2176 dest = revlog.revlog(vfs,
2175 2177 indexfile=indexname,
2176 2178 datafile=dataname)
2177 2179 if dest._inline:
2178 2180 raise error.Abort('not supporting inline revlog (yet)')
2179 2181 # make sure internals are initialized
2180 2182 dest.revision(len(dest) - 1)
2181 2183 yield dest
2182 2184 del dest, vfs
2183 2185 finally:
2184 2186 shutil.rmtree(tmpdir, True)
2185 2187
2186 2188 @command(b'perfrevlogchunks', revlogopts + formatteropts +
2187 2189 [(b'e', b'engines', b'', b'compression engines to use'),
2188 2190 (b's', b'startrev', 0, b'revision to start at')],
2189 2191 b'-c|-m|FILE')
2190 2192 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
2191 2193 """Benchmark operations on revlog chunks.
2192 2194
2193 2195 Logically, each revlog is a collection of fulltext revisions. However,
2194 2196 stored within each revlog are "chunks" of possibly compressed data. This
2195 2197 data needs to be read and decompressed or compressed and written.
2196 2198
2197 2199 This command measures the time it takes to read+decompress and recompress
2198 2200 chunks in a revlog. It effectively isolates I/O and compression performance.
2199 2201 For measurements of higher-level operations like resolving revisions,
2200 2202 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
2201 2203 """
2202 2204 opts = _byteskwargs(opts)
2203 2205
2204 2206 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
2205 2207
2206 2208 # _chunkraw was renamed to _getsegmentforrevs.
2207 2209 try:
2208 2210 segmentforrevs = rl._getsegmentforrevs
2209 2211 except AttributeError:
2210 2212 segmentforrevs = rl._chunkraw
2211 2213
2212 2214 # Verify engines argument.
2213 2215 if engines:
2214 2216 engines = set(e.strip() for e in engines.split(b','))
2215 2217 for engine in engines:
2216 2218 try:
2217 2219 util.compressionengines[engine]
2218 2220 except KeyError:
2219 2221 raise error.Abort(b'unknown compression engine: %s' % engine)
2220 2222 else:
2221 2223 engines = []
2222 2224 for e in util.compengines:
2223 2225 engine = util.compengines[e]
2224 2226 try:
2225 2227 if engine.available():
2226 2228 engine.revlogcompressor().compress(b'dummy')
2227 2229 engines.append(e)
2228 2230 except NotImplementedError:
2229 2231 pass
2230 2232
2231 2233 revs = list(rl.revs(startrev, len(rl) - 1))
2232 2234
2233 2235 def rlfh(rl):
2234 2236 if rl._inline:
2235 2237 return getsvfs(repo)(rl.indexfile)
2236 2238 else:
2237 2239 return getsvfs(repo)(rl.datafile)
2238 2240
2239 2241 def doread():
2240 2242 rl.clearcaches()
2241 2243 for rev in revs:
2242 2244 segmentforrevs(rev, rev)
2243 2245
2244 2246 def doreadcachedfh():
2245 2247 rl.clearcaches()
2246 2248 fh = rlfh(rl)
2247 2249 for rev in revs:
2248 2250 segmentforrevs(rev, rev, df=fh)
2249 2251
2250 2252 def doreadbatch():
2251 2253 rl.clearcaches()
2252 2254 segmentforrevs(revs[0], revs[-1])
2253 2255
2254 2256 def doreadbatchcachedfh():
2255 2257 rl.clearcaches()
2256 2258 fh = rlfh(rl)
2257 2259 segmentforrevs(revs[0], revs[-1], df=fh)
2258 2260
2259 2261 def dochunk():
2260 2262 rl.clearcaches()
2261 2263 fh = rlfh(rl)
2262 2264 for rev in revs:
2263 2265 rl._chunk(rev, df=fh)
2264 2266
2265 2267 chunks = [None]
2266 2268
2267 2269 def dochunkbatch():
2268 2270 rl.clearcaches()
2269 2271 fh = rlfh(rl)
2270 2272 # Save chunks as a side-effect.
2271 2273 chunks[0] = rl._chunks(revs, df=fh)
2272 2274
2273 2275 def docompress(compressor):
2274 2276 rl.clearcaches()
2275 2277
2276 2278 try:
2277 2279 # Swap in the requested compression engine.
2278 2280 oldcompressor = rl._compressor
2279 2281 rl._compressor = compressor
2280 2282 for chunk in chunks[0]:
2281 2283 rl.compress(chunk)
2282 2284 finally:
2283 2285 rl._compressor = oldcompressor
2284 2286
2285 2287 benches = [
2286 2288 (lambda: doread(), b'read'),
2287 2289 (lambda: doreadcachedfh(), b'read w/ reused fd'),
2288 2290 (lambda: doreadbatch(), b'read batch'),
2289 2291 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
2290 2292 (lambda: dochunk(), b'chunk'),
2291 2293 (lambda: dochunkbatch(), b'chunk batch'),
2292 2294 ]
2293 2295
2294 2296 for engine in sorted(engines):
2295 2297 compressor = util.compengines[engine].revlogcompressor()
2296 2298 benches.append((functools.partial(docompress, compressor),
2297 2299 b'compress w/ %s' % engine))
2298 2300
2299 2301 for fn, title in benches:
2300 2302 timer, fm = gettimer(ui, opts)
2301 2303 timer(fn, title=title)
2302 2304 fm.end()
2303 2305
2304 2306 @command(b'perfrevlogrevision', revlogopts + formatteropts +
2305 2307 [(b'', b'cache', False, b'use caches instead of clearing')],
2306 2308 b'-c|-m|FILE REV')
2307 2309 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
2308 2310 """Benchmark obtaining a revlog revision.
2309 2311
2310 2312 Obtaining a revlog revision consists of roughly the following steps:
2311 2313
2312 2314 1. Compute the delta chain
2313 2315 2. Slice the delta chain if applicable
2314 2316 3. Obtain the raw chunks for that delta chain
2315 2317 4. Decompress each raw chunk
2316 2318 5. Apply binary patches to obtain fulltext
2317 2319 6. Verify hash of fulltext
2318 2320
2319 2321 This command measures the time spent in each of these phases.
2320 2322 """
2321 2323 opts = _byteskwargs(opts)
2322 2324
2323 2325 if opts.get(b'changelog') or opts.get(b'manifest'):
2324 2326 file_, rev = None, file_
2325 2327 elif rev is None:
2326 2328 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
2327 2329
2328 2330 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
2329 2331
2330 2332 # _chunkraw was renamed to _getsegmentforrevs.
2331 2333 try:
2332 2334 segmentforrevs = r._getsegmentforrevs
2333 2335 except AttributeError:
2334 2336 segmentforrevs = r._chunkraw
2335 2337
2336 2338 node = r.lookup(rev)
2337 2339 rev = r.rev(node)
2338 2340
2339 2341 def getrawchunks(data, chain):
2340 2342 start = r.start
2341 2343 length = r.length
2342 2344 inline = r._inline
2343 2345 iosize = r._io.size
2344 2346 buffer = util.buffer
2345 2347
2346 2348 chunks = []
2347 2349 ladd = chunks.append
2348 2350 for idx, item in enumerate(chain):
2349 2351 offset = start(item[0])
2350 2352 bits = data[idx]
2351 2353 for rev in item:
2352 2354 chunkstart = start(rev)
2353 2355 if inline:
2354 2356 chunkstart += (rev + 1) * iosize
2355 2357 chunklength = length(rev)
2356 2358 ladd(buffer(bits, chunkstart - offset, chunklength))
2357 2359
2358 2360 return chunks
2359 2361
2360 2362 def dodeltachain(rev):
2361 2363 if not cache:
2362 2364 r.clearcaches()
2363 2365 r._deltachain(rev)
2364 2366
2365 2367 def doread(chain):
2366 2368 if not cache:
2367 2369 r.clearcaches()
2368 2370 for item in slicedchain:
2369 2371 segmentforrevs(item[0], item[-1])
2370 2372
2371 2373 def doslice(r, chain, size):
2372 2374 for s in slicechunk(r, chain, targetsize=size):
2373 2375 pass
2374 2376
2375 2377 def dorawchunks(data, chain):
2376 2378 if not cache:
2377 2379 r.clearcaches()
2378 2380 getrawchunks(data, chain)
2379 2381
2380 2382 def dodecompress(chunks):
2381 2383 decomp = r.decompress
2382 2384 for chunk in chunks:
2383 2385 decomp(chunk)
2384 2386
2385 2387 def dopatch(text, bins):
2386 2388 if not cache:
2387 2389 r.clearcaches()
2388 2390 mdiff.patches(text, bins)
2389 2391
2390 2392 def dohash(text):
2391 2393 if not cache:
2392 2394 r.clearcaches()
2393 2395 r.checkhash(text, node, rev=rev)
2394 2396
2395 2397 def dorevision():
2396 2398 if not cache:
2397 2399 r.clearcaches()
2398 2400 r.revision(node)
2399 2401
2400 2402 try:
2401 2403 from mercurial.revlogutils.deltas import slicechunk
2402 2404 except ImportError:
2403 2405 slicechunk = getattr(revlog, '_slicechunk', None)
2404 2406
2405 2407 size = r.length(rev)
2406 2408 chain = r._deltachain(rev)[0]
2407 2409 if not getattr(r, '_withsparseread', False):
2408 2410 slicedchain = (chain,)
2409 2411 else:
2410 2412 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
2411 2413 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
2412 2414 rawchunks = getrawchunks(data, slicedchain)
2413 2415 bins = r._chunks(chain)
2414 2416 text = bytes(bins[0])
2415 2417 bins = bins[1:]
2416 2418 text = mdiff.patches(text, bins)
2417 2419
2418 2420 benches = [
2419 2421 (lambda: dorevision(), b'full'),
2420 2422 (lambda: dodeltachain(rev), b'deltachain'),
2421 2423 (lambda: doread(chain), b'read'),
2422 2424 ]
2423 2425
2424 2426 if getattr(r, '_withsparseread', False):
2425 2427 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
2426 2428 benches.append(slicing)
2427 2429
2428 2430 benches.extend([
2429 2431 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
2430 2432 (lambda: dodecompress(rawchunks), b'decompress'),
2431 2433 (lambda: dopatch(text, bins), b'patch'),
2432 2434 (lambda: dohash(text), b'hash'),
2433 2435 ])
2434 2436
2435 2437 timer, fm = gettimer(ui, opts)
2436 2438 for fn, title in benches:
2437 2439 timer(fn, title=title)
2438 2440 fm.end()
2439 2441
2440 2442 @command(b'perfrevset',
2441 2443 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
2442 2444 (b'', b'contexts', False, b'obtain changectx for each revision')]
2443 2445 + formatteropts, b"REVSET")
2444 2446 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
2445 2447 """benchmark the execution time of a revset
2446 2448
2447 2449 Use the --clean option if need to evaluate the impact of build volatile
2448 2450 revisions set cache on the revset execution. Volatile cache hold filtered
2449 2451 and obsolete related cache."""
2450 2452 opts = _byteskwargs(opts)
2451 2453
2452 2454 timer, fm = gettimer(ui, opts)
2453 2455 def d():
2454 2456 if clear:
2455 2457 repo.invalidatevolatilesets()
2456 2458 if contexts:
2457 2459 for ctx in repo.set(expr): pass
2458 2460 else:
2459 2461 for r in repo.revs(expr): pass
2460 2462 timer(d)
2461 2463 fm.end()
2462 2464
2463 2465 @command(b'perfvolatilesets',
2464 2466 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
2465 2467 ] + formatteropts)
2466 2468 def perfvolatilesets(ui, repo, *names, **opts):
2467 2469 """benchmark the computation of various volatile set
2468 2470
2469 2471 Volatile set computes element related to filtering and obsolescence."""
2470 2472 opts = _byteskwargs(opts)
2471 2473 timer, fm = gettimer(ui, opts)
2472 2474 repo = repo.unfiltered()
2473 2475
2474 2476 def getobs(name):
2475 2477 def d():
2476 2478 repo.invalidatevolatilesets()
2477 2479 if opts[b'clear_obsstore']:
2478 2480 clearfilecache(repo, b'obsstore')
2479 2481 obsolete.getrevs(repo, name)
2480 2482 return d
2481 2483
2482 2484 allobs = sorted(obsolete.cachefuncs)
2483 2485 if names:
2484 2486 allobs = [n for n in allobs if n in names]
2485 2487
2486 2488 for name in allobs:
2487 2489 timer(getobs(name), title=name)
2488 2490
2489 2491 def getfiltered(name):
2490 2492 def d():
2491 2493 repo.invalidatevolatilesets()
2492 2494 if opts[b'clear_obsstore']:
2493 2495 clearfilecache(repo, b'obsstore')
2494 2496 repoview.filterrevs(repo, name)
2495 2497 return d
2496 2498
2497 2499 allfilter = sorted(repoview.filtertable)
2498 2500 if names:
2499 2501 allfilter = [n for n in allfilter if n in names]
2500 2502
2501 2503 for name in allfilter:
2502 2504 timer(getfiltered(name), title=name)
2503 2505 fm.end()
2504 2506
2505 2507 @command(b'perfbranchmap',
2506 2508 [(b'f', b'full', False,
2507 2509 b'Includes build time of subset'),
2508 2510 (b'', b'clear-revbranch', False,
2509 2511 b'purge the revbranch cache between computation'),
2510 2512 ] + formatteropts)
2511 2513 def perfbranchmap(ui, repo, *filternames, **opts):
2512 2514 """benchmark the update of a branchmap
2513 2515
2514 2516 This benchmarks the full repo.branchmap() call with read and write disabled
2515 2517 """
2516 2518 opts = _byteskwargs(opts)
2517 2519 full = opts.get(b"full", False)
2518 2520 clear_revbranch = opts.get(b"clear_revbranch", False)
2519 2521 timer, fm = gettimer(ui, opts)
2520 2522 def getbranchmap(filtername):
2521 2523 """generate a benchmark function for the filtername"""
2522 2524 if filtername is None:
2523 2525 view = repo
2524 2526 else:
2525 2527 view = repo.filtered(filtername)
2526 2528 if util.safehasattr(view._branchcaches, '_per_filter'):
2527 2529 filtered = view._branchcaches._per_filter
2528 2530 else:
2529 2531 # older versions
2530 2532 filtered = view._branchcaches
2531 2533 def d():
2532 2534 if clear_revbranch:
2533 2535 repo.revbranchcache()._clear()
2534 2536 if full:
2535 2537 view._branchcaches.clear()
2536 2538 else:
2537 2539 filtered.pop(filtername, None)
2538 2540 view.branchmap()
2539 2541 return d
2540 2542 # add filter in smaller subset to bigger subset
2541 2543 possiblefilters = set(repoview.filtertable)
2542 2544 if filternames:
2543 2545 possiblefilters &= set(filternames)
2544 2546 subsettable = getbranchmapsubsettable()
2545 2547 allfilters = []
2546 2548 while possiblefilters:
2547 2549 for name in possiblefilters:
2548 2550 subset = subsettable.get(name)
2549 2551 if subset not in possiblefilters:
2550 2552 break
2551 2553 else:
2552 2554 assert False, b'subset cycle %s!' % possiblefilters
2553 2555 allfilters.append(name)
2554 2556 possiblefilters.remove(name)
2555 2557
2556 2558 # warm the cache
2557 2559 if not full:
2558 2560 for name in allfilters:
2559 2561 repo.filtered(name).branchmap()
2560 2562 if not filternames or b'unfiltered' in filternames:
2561 2563 # add unfiltered
2562 2564 allfilters.append(None)
2563 2565
2564 2566 if util.safehasattr(branchmap.branchcache, 'fromfile'):
2565 2567 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
2566 2568 branchcacheread.set(classmethod(lambda *args: None))
2567 2569 else:
2568 2570 # older versions
2569 2571 branchcacheread = safeattrsetter(branchmap, b'read')
2570 2572 branchcacheread.set(lambda *args: None)
2571 2573 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
2572 2574 branchcachewrite.set(lambda *args: None)
2573 2575 try:
2574 2576 for name in allfilters:
2575 2577 printname = name
2576 2578 if name is None:
2577 2579 printname = b'unfiltered'
2578 2580 timer(getbranchmap(name), title=str(printname))
2579 2581 finally:
2580 2582 branchcacheread.restore()
2581 2583 branchcachewrite.restore()
2582 2584 fm.end()
2583 2585
2584 2586 @command(b'perfbranchmapupdate', [
2585 2587 (b'', b'base', [], b'subset of revision to start from'),
2586 2588 (b'', b'target', [], b'subset of revision to end with'),
2587 2589 (b'', b'clear-caches', False, b'clear cache between each runs')
2588 2590 ] + formatteropts)
2589 2591 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
2590 2592 """benchmark branchmap update from for <base> revs to <target> revs
2591 2593
2592 2594 If `--clear-caches` is passed, the following items will be reset before
2593 2595 each update:
2594 2596 * the changelog instance and associated indexes
2595 2597 * the rev-branch-cache instance
2596 2598
2597 2599 Examples:
2598 2600
2599 2601 # update for the one last revision
2600 2602 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
2601 2603
2602 2604 $ update for change coming with a new branch
2603 2605 $ hg perfbranchmapupdate --base 'stable' --target 'default'
2604 2606 """
2605 2607 from mercurial import branchmap
2606 2608 from mercurial import repoview
2607 2609 opts = _byteskwargs(opts)
2608 2610 timer, fm = gettimer(ui, opts)
2609 2611 clearcaches = opts[b'clear_caches']
2610 2612 unfi = repo.unfiltered()
2611 2613 x = [None] # used to pass data between closure
2612 2614
2613 2615 # we use a `list` here to avoid possible side effect from smartset
2614 2616 baserevs = list(scmutil.revrange(repo, base))
2615 2617 targetrevs = list(scmutil.revrange(repo, target))
2616 2618 if not baserevs:
2617 2619 raise error.Abort(b'no revisions selected for --base')
2618 2620 if not targetrevs:
2619 2621 raise error.Abort(b'no revisions selected for --target')
2620 2622
2621 2623 # make sure the target branchmap also contains the one in the base
2622 2624 targetrevs = list(set(baserevs) | set(targetrevs))
2623 2625 targetrevs.sort()
2624 2626
2625 2627 cl = repo.changelog
2626 2628 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
2627 2629 allbaserevs.sort()
2628 2630 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
2629 2631
2630 2632 newrevs = list(alltargetrevs.difference(allbaserevs))
2631 2633 newrevs.sort()
2632 2634
2633 2635 allrevs = frozenset(unfi.changelog.revs())
2634 2636 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
2635 2637 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
2636 2638
2637 2639 def basefilter(repo, visibilityexceptions=None):
2638 2640 return basefilterrevs
2639 2641
2640 2642 def targetfilter(repo, visibilityexceptions=None):
2641 2643 return targetfilterrevs
2642 2644
2643 2645 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
2644 2646 ui.status(msg % (len(allbaserevs), len(newrevs)))
2645 2647 if targetfilterrevs:
2646 2648 msg = b'(%d revisions still filtered)\n'
2647 2649 ui.status(msg % len(targetfilterrevs))
2648 2650
2649 2651 try:
2650 2652 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
2651 2653 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
2652 2654
2653 2655 baserepo = repo.filtered(b'__perf_branchmap_update_base')
2654 2656 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
2655 2657
2656 2658 # try to find an existing branchmap to reuse
2657 2659 subsettable = getbranchmapsubsettable()
2658 2660 candidatefilter = subsettable.get(None)
2659 2661 while candidatefilter is not None:
2660 2662 candidatebm = repo.filtered(candidatefilter).branchmap()
2661 2663 if candidatebm.validfor(baserepo):
2662 2664 filtered = repoview.filterrevs(repo, candidatefilter)
2663 2665 missing = [r for r in allbaserevs if r in filtered]
2664 2666 base = candidatebm.copy()
2665 2667 base.update(baserepo, missing)
2666 2668 break
2667 2669 candidatefilter = subsettable.get(candidatefilter)
2668 2670 else:
2669 2671 # no suitable subset where found
2670 2672 base = branchmap.branchcache()
2671 2673 base.update(baserepo, allbaserevs)
2672 2674
2673 2675 def setup():
2674 2676 x[0] = base.copy()
2675 2677 if clearcaches:
2676 2678 unfi._revbranchcache = None
2677 2679 clearchangelog(repo)
2678 2680
2679 2681 def bench():
2680 2682 x[0].update(targetrepo, newrevs)
2681 2683
2682 2684 timer(bench, setup=setup)
2683 2685 fm.end()
2684 2686 finally:
2685 2687 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
2686 2688 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
2687 2689
2688 2690 @command(b'perfbranchmapload', [
2689 2691 (b'f', b'filter', b'', b'Specify repoview filter'),
2690 2692 (b'', b'list', False, b'List brachmap filter caches'),
2691 2693 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
2692 2694
2693 2695 ] + formatteropts)
2694 2696 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
2695 2697 """benchmark reading the branchmap"""
2696 2698 opts = _byteskwargs(opts)
2697 2699 clearrevlogs = opts[b'clear_revlogs']
2698 2700
2699 2701 if list:
2700 2702 for name, kind, st in repo.cachevfs.readdir(stat=True):
2701 2703 if name.startswith(b'branch2'):
2702 2704 filtername = name.partition(b'-')[2] or b'unfiltered'
2703 2705 ui.status(b'%s - %s\n'
2704 2706 % (filtername, util.bytecount(st.st_size)))
2705 2707 return
2706 2708 if not filter:
2707 2709 filter = None
2708 2710 subsettable = getbranchmapsubsettable()
2709 2711 if filter is None:
2710 2712 repo = repo.unfiltered()
2711 2713 else:
2712 2714 repo = repoview.repoview(repo, filter)
2713 2715
2714 2716 repo.branchmap() # make sure we have a relevant, up to date branchmap
2715 2717
2716 2718 try:
2717 2719 fromfile = branchmap.branchcache.fromfile
2718 2720 except AttributeError:
2719 2721 # older versions
2720 2722 fromfile = branchmap.read
2721 2723
2722 2724 currentfilter = filter
2723 2725 # try once without timer, the filter may not be cached
2724 2726 while fromfile(repo) is None:
2725 2727 currentfilter = subsettable.get(currentfilter)
2726 2728 if currentfilter is None:
2727 2729 raise error.Abort(b'No branchmap cached for %s repo'
2728 2730 % (filter or b'unfiltered'))
2729 2731 repo = repo.filtered(currentfilter)
2730 2732 timer, fm = gettimer(ui, opts)
2731 2733 def setup():
2732 2734 if clearrevlogs:
2733 2735 clearchangelog(repo)
2734 2736 def bench():
2735 2737 fromfile(repo)
2736 2738 timer(bench, setup=setup)
2737 2739 fm.end()
2738 2740
2739 2741 @command(b'perfloadmarkers')
2740 2742 def perfloadmarkers(ui, repo):
2741 2743 """benchmark the time to parse the on-disk markers for a repo
2742 2744
2743 2745 Result is the number of markers in the repo."""
2744 2746 timer, fm = gettimer(ui)
2745 2747 svfs = getsvfs(repo)
2746 2748 timer(lambda: len(obsolete.obsstore(svfs)))
2747 2749 fm.end()
2748 2750
2749 2751 @command(b'perflrucachedict', formatteropts +
2750 2752 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
2751 2753 (b'', b'mincost', 0, b'smallest cost of items in cache'),
2752 2754 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
2753 2755 (b'', b'size', 4, b'size of cache'),
2754 2756 (b'', b'gets', 10000, b'number of key lookups'),
2755 2757 (b'', b'sets', 10000, b'number of key sets'),
2756 2758 (b'', b'mixed', 10000, b'number of mixed mode operations'),
2757 2759 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
2758 2760 norepo=True)
2759 2761 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
2760 2762 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
2761 2763 opts = _byteskwargs(opts)
2762 2764
2763 2765 def doinit():
2764 2766 for i in _xrange(10000):
2765 2767 util.lrucachedict(size)
2766 2768
2767 2769 costrange = list(range(mincost, maxcost + 1))
2768 2770
2769 2771 values = []
2770 2772 for i in _xrange(size):
2771 2773 values.append(random.randint(0, _maxint))
2772 2774
2773 2775 # Get mode fills the cache and tests raw lookup performance with no
2774 2776 # eviction.
2775 2777 getseq = []
2776 2778 for i in _xrange(gets):
2777 2779 getseq.append(random.choice(values))
2778 2780
2779 2781 def dogets():
2780 2782 d = util.lrucachedict(size)
2781 2783 for v in values:
2782 2784 d[v] = v
2783 2785 for key in getseq:
2784 2786 value = d[key]
2785 2787 value # silence pyflakes warning
2786 2788
2787 2789 def dogetscost():
2788 2790 d = util.lrucachedict(size, maxcost=costlimit)
2789 2791 for i, v in enumerate(values):
2790 2792 d.insert(v, v, cost=costs[i])
2791 2793 for key in getseq:
2792 2794 try:
2793 2795 value = d[key]
2794 2796 value # silence pyflakes warning
2795 2797 except KeyError:
2796 2798 pass
2797 2799
2798 2800 # Set mode tests insertion speed with cache eviction.
2799 2801 setseq = []
2800 2802 costs = []
2801 2803 for i in _xrange(sets):
2802 2804 setseq.append(random.randint(0, _maxint))
2803 2805 costs.append(random.choice(costrange))
2804 2806
2805 2807 def doinserts():
2806 2808 d = util.lrucachedict(size)
2807 2809 for v in setseq:
2808 2810 d.insert(v, v)
2809 2811
2810 2812 def doinsertscost():
2811 2813 d = util.lrucachedict(size, maxcost=costlimit)
2812 2814 for i, v in enumerate(setseq):
2813 2815 d.insert(v, v, cost=costs[i])
2814 2816
2815 2817 def dosets():
2816 2818 d = util.lrucachedict(size)
2817 2819 for v in setseq:
2818 2820 d[v] = v
2819 2821
2820 2822 # Mixed mode randomly performs gets and sets with eviction.
2821 2823 mixedops = []
2822 2824 for i in _xrange(mixed):
2823 2825 r = random.randint(0, 100)
2824 2826 if r < mixedgetfreq:
2825 2827 op = 0
2826 2828 else:
2827 2829 op = 1
2828 2830
2829 2831 mixedops.append((op,
2830 2832 random.randint(0, size * 2),
2831 2833 random.choice(costrange)))
2832 2834
2833 2835 def domixed():
2834 2836 d = util.lrucachedict(size)
2835 2837
2836 2838 for op, v, cost in mixedops:
2837 2839 if op == 0:
2838 2840 try:
2839 2841 d[v]
2840 2842 except KeyError:
2841 2843 pass
2842 2844 else:
2843 2845 d[v] = v
2844 2846
2845 2847 def domixedcost():
2846 2848 d = util.lrucachedict(size, maxcost=costlimit)
2847 2849
2848 2850 for op, v, cost in mixedops:
2849 2851 if op == 0:
2850 2852 try:
2851 2853 d[v]
2852 2854 except KeyError:
2853 2855 pass
2854 2856 else:
2855 2857 d.insert(v, v, cost=cost)
2856 2858
2857 2859 benches = [
2858 2860 (doinit, b'init'),
2859 2861 ]
2860 2862
2861 2863 if costlimit:
2862 2864 benches.extend([
2863 2865 (dogetscost, b'gets w/ cost limit'),
2864 2866 (doinsertscost, b'inserts w/ cost limit'),
2865 2867 (domixedcost, b'mixed w/ cost limit'),
2866 2868 ])
2867 2869 else:
2868 2870 benches.extend([
2869 2871 (dogets, b'gets'),
2870 2872 (doinserts, b'inserts'),
2871 2873 (dosets, b'sets'),
2872 2874 (domixed, b'mixed')
2873 2875 ])
2874 2876
2875 2877 for fn, title in benches:
2876 2878 timer, fm = gettimer(ui, opts)
2877 2879 timer(fn, title=title)
2878 2880 fm.end()
2879 2881
2880 2882 @command(b'perfwrite', formatteropts)
2881 2883 def perfwrite(ui, repo, **opts):
2882 2884 """microbenchmark ui.write
2883 2885 """
2884 2886 opts = _byteskwargs(opts)
2885 2887
2886 2888 timer, fm = gettimer(ui, opts)
2887 2889 def write():
2888 2890 for i in range(100000):
2889 2891 ui.write((b'Testing write performance\n'))
2890 2892 timer(write)
2891 2893 fm.end()
2892 2894
2893 2895 def uisetup(ui):
2894 2896 if (util.safehasattr(cmdutil, b'openrevlog') and
2895 2897 not util.safehasattr(commands, b'debugrevlogopts')):
2896 2898 # for "historical portability":
2897 2899 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
2898 2900 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
2899 2901 # openrevlog() should cause failure, because it has been
2900 2902 # available since 3.5 (or 49c583ca48c4).
2901 2903 def openrevlog(orig, repo, cmd, file_, opts):
2902 2904 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
2903 2905 raise error.Abort(b"This version doesn't support --dir option",
2904 2906 hint=b"use 3.5 or later")
2905 2907 return orig(repo, cmd, file_, opts)
2906 2908 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
2907 2909
2908 2910 @command(b'perfprogress', formatteropts + [
2909 2911 (b'', b'topic', b'topic', b'topic for progress messages'),
2910 2912 (b'c', b'total', 1000000, b'total value we are progressing to'),
2911 2913 ], norepo=True)
2912 2914 def perfprogress(ui, topic=None, total=None, **opts):
2913 2915 """printing of progress bars"""
2914 2916 opts = _byteskwargs(opts)
2915 2917
2916 2918 timer, fm = gettimer(ui, opts)
2917 2919
2918 2920 def doprogress():
2919 2921 with ui.makeprogress(topic, total=total) as progress:
2920 2922 for i in pycompat.xrange(total):
2921 2923 progress.increment()
2922 2924
2923 2925 timer(doprogress)
2924 2926 fm.end()
General Comments 0
You need to be logged in to leave comments. Login now