##// END OF EJS Templates
perf: add a --from flag to perfmergecalculate...
marmoute -
r42573:e3ee707d default
parent child Browse files
Show More
@@ -1,2904 +1,2912
1 1 # perf.py - performance test routines
2 2 '''helper extension to measure performance
3 3
4 4 Configurations
5 5 ==============
6 6
7 7 ``perf``
8 8 --------
9 9
10 10 ``all-timing``
11 11 When set, additional statistics will be reported for each benchmark: best,
12 12 worst, median average. If not set only the best timing is reported
13 13 (default: off).
14 14
15 15 ``presleep``
16 16 number of second to wait before any group of runs (default: 1)
17 17
18 18 ``pre-run``
19 19 number of run to perform before starting measurement.
20 20
21 21 ``profile-benchmark``
22 22 Enable profiling for the benchmarked section.
23 23 (The first iteration is benchmarked)
24 24
25 25 ``run-limits``
26 26 Control the number of runs each benchmark will perform. The option value
27 27 should be a list of `<time>-<numberofrun>` pairs. After each run the
28 28 conditions are considered in order with the following logic:
29 29
30 30 If benchmark has been running for <time> seconds, and we have performed
31 31 <numberofrun> iterations, stop the benchmark,
32 32
33 33 The default value is: `3.0-100, 10.0-3`
34 34
35 35 ``stub``
36 36 When set, benchmarks will only be run once, useful for testing
37 37 (default: off)
38 38 '''
39 39
40 40 # "historical portability" policy of perf.py:
41 41 #
42 42 # We have to do:
43 43 # - make perf.py "loadable" with as wide Mercurial version as possible
44 44 # This doesn't mean that perf commands work correctly with that Mercurial.
45 45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
46 46 # - make historical perf command work correctly with as wide Mercurial
47 47 # version as possible
48 48 #
49 49 # We have to do, if possible with reasonable cost:
50 50 # - make recent perf command for historical feature work correctly
51 51 # with early Mercurial
52 52 #
53 53 # We don't have to do:
54 54 # - make perf command for recent feature work correctly with early
55 55 # Mercurial
56 56
57 57 from __future__ import absolute_import
58 58 import contextlib
59 59 import functools
60 60 import gc
61 61 import os
62 62 import random
63 63 import shutil
64 64 import struct
65 65 import sys
66 66 import tempfile
67 67 import threading
68 68 import time
69 69 from mercurial import (
70 70 changegroup,
71 71 cmdutil,
72 72 commands,
73 73 copies,
74 74 error,
75 75 extensions,
76 76 hg,
77 77 mdiff,
78 78 merge,
79 79 revlog,
80 80 util,
81 81 )
82 82
83 83 # for "historical portability":
84 84 # try to import modules separately (in dict order), and ignore
85 85 # failure, because these aren't available with early Mercurial
86 86 try:
87 87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
88 88 except ImportError:
89 89 pass
90 90 try:
91 91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
92 92 except ImportError:
93 93 pass
94 94 try:
95 95 from mercurial import registrar # since 3.7 (or 37d50250b696)
96 96 dir(registrar) # forcibly load it
97 97 except ImportError:
98 98 registrar = None
99 99 try:
100 100 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
101 101 except ImportError:
102 102 pass
103 103 try:
104 104 from mercurial.utils import repoviewutil # since 5.0
105 105 except ImportError:
106 106 repoviewutil = None
107 107 try:
108 108 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
109 109 except ImportError:
110 110 pass
111 111 try:
112 112 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
113 113 except ImportError:
114 114 pass
115 115
116 116 try:
117 117 from mercurial import profiling
118 118 except ImportError:
119 119 profiling = None
120 120
121 121 def identity(a):
122 122 return a
123 123
124 124 try:
125 125 from mercurial import pycompat
126 126 getargspec = pycompat.getargspec # added to module after 4.5
127 127 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
128 128 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
129 129 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
130 130 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
131 131 if pycompat.ispy3:
132 132 _maxint = sys.maxsize # per py3 docs for replacing maxint
133 133 else:
134 134 _maxint = sys.maxint
135 135 except (ImportError, AttributeError):
136 136 import inspect
137 137 getargspec = inspect.getargspec
138 138 _byteskwargs = identity
139 139 fsencode = identity # no py3 support
140 140 _maxint = sys.maxint # no py3 support
141 141 _sysstr = lambda x: x # no py3 support
142 142 _xrange = xrange
143 143
144 144 try:
145 145 # 4.7+
146 146 queue = pycompat.queue.Queue
147 147 except (AttributeError, ImportError):
148 148 # <4.7.
149 149 try:
150 150 queue = pycompat.queue
151 151 except (AttributeError, ImportError):
152 152 queue = util.queue
153 153
154 154 try:
155 155 from mercurial import logcmdutil
156 156 makelogtemplater = logcmdutil.maketemplater
157 157 except (AttributeError, ImportError):
158 158 try:
159 159 makelogtemplater = cmdutil.makelogtemplater
160 160 except (AttributeError, ImportError):
161 161 makelogtemplater = None
162 162
163 163 # for "historical portability":
164 164 # define util.safehasattr forcibly, because util.safehasattr has been
165 165 # available since 1.9.3 (or 94b200a11cf7)
166 166 _undefined = object()
167 167 def safehasattr(thing, attr):
168 168 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
169 169 setattr(util, 'safehasattr', safehasattr)
170 170
171 171 # for "historical portability":
172 172 # define util.timer forcibly, because util.timer has been available
173 173 # since ae5d60bb70c9
174 174 if safehasattr(time, 'perf_counter'):
175 175 util.timer = time.perf_counter
176 176 elif os.name == b'nt':
177 177 util.timer = time.clock
178 178 else:
179 179 util.timer = time.time
180 180
181 181 # for "historical portability":
182 182 # use locally defined empty option list, if formatteropts isn't
183 183 # available, because commands.formatteropts has been available since
184 184 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
185 185 # available since 2.2 (or ae5f92e154d3)
186 186 formatteropts = getattr(cmdutil, "formatteropts",
187 187 getattr(commands, "formatteropts", []))
188 188
189 189 # for "historical portability":
190 190 # use locally defined option list, if debugrevlogopts isn't available,
191 191 # because commands.debugrevlogopts has been available since 3.7 (or
192 192 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
193 193 # since 1.9 (or a79fea6b3e77).
194 194 revlogopts = getattr(cmdutil, "debugrevlogopts",
195 195 getattr(commands, "debugrevlogopts", [
196 196 (b'c', b'changelog', False, (b'open changelog')),
197 197 (b'm', b'manifest', False, (b'open manifest')),
198 198 (b'', b'dir', False, (b'open directory manifest')),
199 199 ]))
200 200
201 201 cmdtable = {}
202 202
203 203 # for "historical portability":
204 204 # define parsealiases locally, because cmdutil.parsealiases has been
205 205 # available since 1.5 (or 6252852b4332)
206 206 def parsealiases(cmd):
207 207 return cmd.split(b"|")
208 208
209 209 if safehasattr(registrar, 'command'):
210 210 command = registrar.command(cmdtable)
211 211 elif safehasattr(cmdutil, 'command'):
212 212 command = cmdutil.command(cmdtable)
213 213 if b'norepo' not in getargspec(command).args:
214 214 # for "historical portability":
215 215 # wrap original cmdutil.command, because "norepo" option has
216 216 # been available since 3.1 (or 75a96326cecb)
217 217 _command = command
218 218 def command(name, options=(), synopsis=None, norepo=False):
219 219 if norepo:
220 220 commands.norepo += b' %s' % b' '.join(parsealiases(name))
221 221 return _command(name, list(options), synopsis)
222 222 else:
223 223 # for "historical portability":
224 224 # define "@command" annotation locally, because cmdutil.command
225 225 # has been available since 1.9 (or 2daa5179e73f)
226 226 def command(name, options=(), synopsis=None, norepo=False):
227 227 def decorator(func):
228 228 if synopsis:
229 229 cmdtable[name] = func, list(options), synopsis
230 230 else:
231 231 cmdtable[name] = func, list(options)
232 232 if norepo:
233 233 commands.norepo += b' %s' % b' '.join(parsealiases(name))
234 234 return func
235 235 return decorator
236 236
237 237 try:
238 238 import mercurial.registrar
239 239 import mercurial.configitems
240 240 configtable = {}
241 241 configitem = mercurial.registrar.configitem(configtable)
242 242 configitem(b'perf', b'presleep',
243 243 default=mercurial.configitems.dynamicdefault,
244 244 )
245 245 configitem(b'perf', b'stub',
246 246 default=mercurial.configitems.dynamicdefault,
247 247 )
248 248 configitem(b'perf', b'parentscount',
249 249 default=mercurial.configitems.dynamicdefault,
250 250 )
251 251 configitem(b'perf', b'all-timing',
252 252 default=mercurial.configitems.dynamicdefault,
253 253 )
254 254 configitem(b'perf', b'pre-run',
255 255 default=mercurial.configitems.dynamicdefault,
256 256 )
257 257 configitem(b'perf', b'profile-benchmark',
258 258 default=mercurial.configitems.dynamicdefault,
259 259 )
260 260 configitem(b'perf', b'run-limits',
261 261 default=mercurial.configitems.dynamicdefault,
262 262 )
263 263 except (ImportError, AttributeError):
264 264 pass
265 265
266 266 def getlen(ui):
267 267 if ui.configbool(b"perf", b"stub", False):
268 268 return lambda x: 1
269 269 return len
270 270
271 271 class noop(object):
272 272 """dummy context manager"""
273 273 def __enter__(self):
274 274 pass
275 275 def __exit__(self, *args):
276 276 pass
277 277
278 278 NOOPCTX = noop()
279 279
280 280 def gettimer(ui, opts=None):
281 281 """return a timer function and formatter: (timer, formatter)
282 282
283 283 This function exists to gather the creation of formatter in a single
284 284 place instead of duplicating it in all performance commands."""
285 285
286 286 # enforce an idle period before execution to counteract power management
287 287 # experimental config: perf.presleep
288 288 time.sleep(getint(ui, b"perf", b"presleep", 1))
289 289
290 290 if opts is None:
291 291 opts = {}
292 292 # redirect all to stderr unless buffer api is in use
293 293 if not ui._buffers:
294 294 ui = ui.copy()
295 295 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
296 296 if uifout:
297 297 # for "historical portability":
298 298 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
299 299 uifout.set(ui.ferr)
300 300
301 301 # get a formatter
302 302 uiformatter = getattr(ui, 'formatter', None)
303 303 if uiformatter:
304 304 fm = uiformatter(b'perf', opts)
305 305 else:
306 306 # for "historical portability":
307 307 # define formatter locally, because ui.formatter has been
308 308 # available since 2.2 (or ae5f92e154d3)
309 309 from mercurial import node
310 310 class defaultformatter(object):
311 311 """Minimized composition of baseformatter and plainformatter
312 312 """
313 313 def __init__(self, ui, topic, opts):
314 314 self._ui = ui
315 315 if ui.debugflag:
316 316 self.hexfunc = node.hex
317 317 else:
318 318 self.hexfunc = node.short
319 319 def __nonzero__(self):
320 320 return False
321 321 __bool__ = __nonzero__
322 322 def startitem(self):
323 323 pass
324 324 def data(self, **data):
325 325 pass
326 326 def write(self, fields, deftext, *fielddata, **opts):
327 327 self._ui.write(deftext % fielddata, **opts)
328 328 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
329 329 if cond:
330 330 self._ui.write(deftext % fielddata, **opts)
331 331 def plain(self, text, **opts):
332 332 self._ui.write(text, **opts)
333 333 def end(self):
334 334 pass
335 335 fm = defaultformatter(ui, b'perf', opts)
336 336
337 337 # stub function, runs code only once instead of in a loop
338 338 # experimental config: perf.stub
339 339 if ui.configbool(b"perf", b"stub", False):
340 340 return functools.partial(stub_timer, fm), fm
341 341
342 342 # experimental config: perf.all-timing
343 343 displayall = ui.configbool(b"perf", b"all-timing", False)
344 344
345 345 # experimental config: perf.run-limits
346 346 limitspec = ui.configlist(b"perf", b"run-limits", [])
347 347 limits = []
348 348 for item in limitspec:
349 349 parts = item.split(b'-', 1)
350 350 if len(parts) < 2:
351 351 ui.warn((b'malformatted run limit entry, missing "-": %s\n'
352 352 % item))
353 353 continue
354 354 try:
355 355 time_limit = float(pycompat.sysstr(parts[0]))
356 356 except ValueError as e:
357 357 ui.warn((b'malformatted run limit entry, %s: %s\n'
358 358 % (pycompat.bytestr(e), item)))
359 359 continue
360 360 try:
361 361 run_limit = int(pycompat.sysstr(parts[1]))
362 362 except ValueError as e:
363 363 ui.warn((b'malformatted run limit entry, %s: %s\n'
364 364 % (pycompat.bytestr(e), item)))
365 365 continue
366 366 limits.append((time_limit, run_limit))
367 367 if not limits:
368 368 limits = DEFAULTLIMITS
369 369
370 370 profiler = None
371 371 if profiling is not None:
372 372 if ui.configbool(b"perf", b"profile-benchmark", False):
373 373 profiler = profiling.profile(ui)
374 374
375 375 prerun = getint(ui, b"perf", b"pre-run", 0)
376 376 t = functools.partial(_timer, fm, displayall=displayall, limits=limits,
377 377 prerun=prerun, profiler=profiler)
378 378 return t, fm
379 379
380 380 def stub_timer(fm, func, setup=None, title=None):
381 381 if setup is not None:
382 382 setup()
383 383 func()
384 384
385 385 @contextlib.contextmanager
386 386 def timeone():
387 387 r = []
388 388 ostart = os.times()
389 389 cstart = util.timer()
390 390 yield r
391 391 cstop = util.timer()
392 392 ostop = os.times()
393 393 a, b = ostart, ostop
394 394 r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
395 395
396 396
397 397 # list of stop condition (elapsed time, minimal run count)
398 398 DEFAULTLIMITS = (
399 399 (3.0, 100),
400 400 (10.0, 3),
401 401 )
402 402
403 403 def _timer(fm, func, setup=None, title=None, displayall=False,
404 404 limits=DEFAULTLIMITS, prerun=0, profiler=None):
405 405 gc.collect()
406 406 results = []
407 407 begin = util.timer()
408 408 count = 0
409 409 if profiler is None:
410 410 profiler = NOOPCTX
411 411 for i in range(prerun):
412 412 if setup is not None:
413 413 setup()
414 414 func()
415 415 keepgoing = True
416 416 while keepgoing:
417 417 if setup is not None:
418 418 setup()
419 419 with profiler:
420 420 with timeone() as item:
421 421 r = func()
422 422 profiler = NOOPCTX
423 423 count += 1
424 424 results.append(item[0])
425 425 cstop = util.timer()
426 426 # Look for a stop condition.
427 427 elapsed = cstop - begin
428 428 for t, mincount in limits:
429 429 if elapsed >= t and count >= mincount:
430 430 keepgoing = False
431 431 break
432 432
433 433 formatone(fm, results, title=title, result=r,
434 434 displayall=displayall)
435 435
436 436 def formatone(fm, timings, title=None, result=None, displayall=False):
437 437
438 438 count = len(timings)
439 439
440 440 fm.startitem()
441 441
442 442 if title:
443 443 fm.write(b'title', b'! %s\n', title)
444 444 if result:
445 445 fm.write(b'result', b'! result: %s\n', result)
446 446 def display(role, entry):
447 447 prefix = b''
448 448 if role != b'best':
449 449 prefix = b'%s.' % role
450 450 fm.plain(b'!')
451 451 fm.write(prefix + b'wall', b' wall %f', entry[0])
452 452 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
453 453 fm.write(prefix + b'user', b' user %f', entry[1])
454 454 fm.write(prefix + b'sys', b' sys %f', entry[2])
455 455 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
456 456 fm.plain(b'\n')
457 457 timings.sort()
458 458 min_val = timings[0]
459 459 display(b'best', min_val)
460 460 if displayall:
461 461 max_val = timings[-1]
462 462 display(b'max', max_val)
463 463 avg = tuple([sum(x) / count for x in zip(*timings)])
464 464 display(b'avg', avg)
465 465 median = timings[len(timings) // 2]
466 466 display(b'median', median)
467 467
468 468 # utilities for historical portability
469 469
470 470 def getint(ui, section, name, default):
471 471 # for "historical portability":
472 472 # ui.configint has been available since 1.9 (or fa2b596db182)
473 473 v = ui.config(section, name, None)
474 474 if v is None:
475 475 return default
476 476 try:
477 477 return int(v)
478 478 except ValueError:
479 479 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
480 480 % (section, name, v))
481 481
482 482 def safeattrsetter(obj, name, ignoremissing=False):
483 483 """Ensure that 'obj' has 'name' attribute before subsequent setattr
484 484
485 485 This function is aborted, if 'obj' doesn't have 'name' attribute
486 486 at runtime. This avoids overlooking removal of an attribute, which
487 487 breaks assumption of performance measurement, in the future.
488 488
489 489 This function returns the object to (1) assign a new value, and
490 490 (2) restore an original value to the attribute.
491 491
492 492 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
493 493 abortion, and this function returns None. This is useful to
494 494 examine an attribute, which isn't ensured in all Mercurial
495 495 versions.
496 496 """
497 497 if not util.safehasattr(obj, name):
498 498 if ignoremissing:
499 499 return None
500 500 raise error.Abort((b"missing attribute %s of %s might break assumption"
501 501 b" of performance measurement") % (name, obj))
502 502
503 503 origvalue = getattr(obj, _sysstr(name))
504 504 class attrutil(object):
505 505 def set(self, newvalue):
506 506 setattr(obj, _sysstr(name), newvalue)
507 507 def restore(self):
508 508 setattr(obj, _sysstr(name), origvalue)
509 509
510 510 return attrutil()
511 511
512 512 # utilities to examine each internal API changes
513 513
514 514 def getbranchmapsubsettable():
515 515 # for "historical portability":
516 516 # subsettable is defined in:
517 517 # - branchmap since 2.9 (or 175c6fd8cacc)
518 518 # - repoview since 2.5 (or 59a9f18d4587)
519 519 # - repoviewutil since 5.0
520 520 for mod in (branchmap, repoview, repoviewutil):
521 521 subsettable = getattr(mod, 'subsettable', None)
522 522 if subsettable:
523 523 return subsettable
524 524
525 525 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
526 526 # branchmap and repoview modules exist, but subsettable attribute
527 527 # doesn't)
528 528 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
529 529 hint=b"use 2.5 or later")
530 530
531 531 def getsvfs(repo):
532 532 """Return appropriate object to access files under .hg/store
533 533 """
534 534 # for "historical portability":
535 535 # repo.svfs has been available since 2.3 (or 7034365089bf)
536 536 svfs = getattr(repo, 'svfs', None)
537 537 if svfs:
538 538 return svfs
539 539 else:
540 540 return getattr(repo, 'sopener')
541 541
542 542 def getvfs(repo):
543 543 """Return appropriate object to access files under .hg
544 544 """
545 545 # for "historical portability":
546 546 # repo.vfs has been available since 2.3 (or 7034365089bf)
547 547 vfs = getattr(repo, 'vfs', None)
548 548 if vfs:
549 549 return vfs
550 550 else:
551 551 return getattr(repo, 'opener')
552 552
553 553 def repocleartagscachefunc(repo):
554 554 """Return the function to clear tags cache according to repo internal API
555 555 """
556 556 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
557 557 # in this case, setattr(repo, '_tagscache', None) or so isn't
558 558 # correct way to clear tags cache, because existing code paths
559 559 # expect _tagscache to be a structured object.
560 560 def clearcache():
561 561 # _tagscache has been filteredpropertycache since 2.5 (or
562 562 # 98c867ac1330), and delattr() can't work in such case
563 563 if b'_tagscache' in vars(repo):
564 564 del repo.__dict__[b'_tagscache']
565 565 return clearcache
566 566
567 567 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
568 568 if repotags: # since 1.4 (or 5614a628d173)
569 569 return lambda : repotags.set(None)
570 570
571 571 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
572 572 if repotagscache: # since 0.6 (or d7df759d0e97)
573 573 return lambda : repotagscache.set(None)
574 574
575 575 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
576 576 # this point, but it isn't so problematic, because:
577 577 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
578 578 # in perftags() causes failure soon
579 579 # - perf.py itself has been available since 1.1 (or eb240755386d)
580 580 raise error.Abort((b"tags API of this hg command is unknown"))
581 581
582 582 # utilities to clear cache
583 583
584 584 def clearfilecache(obj, attrname):
585 585 unfiltered = getattr(obj, 'unfiltered', None)
586 586 if unfiltered is not None:
587 587 obj = obj.unfiltered()
588 588 if attrname in vars(obj):
589 589 delattr(obj, attrname)
590 590 obj._filecache.pop(attrname, None)
591 591
592 592 def clearchangelog(repo):
593 593 if repo is not repo.unfiltered():
594 594 object.__setattr__(repo, r'_clcachekey', None)
595 595 object.__setattr__(repo, r'_clcache', None)
596 596 clearfilecache(repo.unfiltered(), 'changelog')
597 597
598 598 # perf commands
599 599
600 600 @command(b'perfwalk', formatteropts)
601 601 def perfwalk(ui, repo, *pats, **opts):
602 602 opts = _byteskwargs(opts)
603 603 timer, fm = gettimer(ui, opts)
604 604 m = scmutil.match(repo[None], pats, {})
605 605 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
606 606 ignored=False))))
607 607 fm.end()
608 608
609 609 @command(b'perfannotate', formatteropts)
610 610 def perfannotate(ui, repo, f, **opts):
611 611 opts = _byteskwargs(opts)
612 612 timer, fm = gettimer(ui, opts)
613 613 fc = repo[b'.'][f]
614 614 timer(lambda: len(fc.annotate(True)))
615 615 fm.end()
616 616
617 617 @command(b'perfstatus',
618 618 [(b'u', b'unknown', False,
619 619 b'ask status to look for unknown files')] + formatteropts)
620 620 def perfstatus(ui, repo, **opts):
621 621 opts = _byteskwargs(opts)
622 622 #m = match.always(repo.root, repo.getcwd())
623 623 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
624 624 # False))))
625 625 timer, fm = gettimer(ui, opts)
626 626 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
627 627 fm.end()
628 628
629 629 @command(b'perfaddremove', formatteropts)
630 630 def perfaddremove(ui, repo, **opts):
631 631 opts = _byteskwargs(opts)
632 632 timer, fm = gettimer(ui, opts)
633 633 try:
634 634 oldquiet = repo.ui.quiet
635 635 repo.ui.quiet = True
636 636 matcher = scmutil.match(repo[None])
637 637 opts[b'dry_run'] = True
638 638 if b'uipathfn' in getargspec(scmutil.addremove).args:
639 639 uipathfn = scmutil.getuipathfn(repo)
640 640 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
641 641 else:
642 642 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
643 643 finally:
644 644 repo.ui.quiet = oldquiet
645 645 fm.end()
646 646
647 647 def clearcaches(cl):
648 648 # behave somewhat consistently across internal API changes
649 649 if util.safehasattr(cl, b'clearcaches'):
650 650 cl.clearcaches()
651 651 elif util.safehasattr(cl, b'_nodecache'):
652 652 from mercurial.node import nullid, nullrev
653 653 cl._nodecache = {nullid: nullrev}
654 654 cl._nodepos = None
655 655
656 656 @command(b'perfheads', formatteropts)
657 657 def perfheads(ui, repo, **opts):
658 658 """benchmark the computation of a changelog heads"""
659 659 opts = _byteskwargs(opts)
660 660 timer, fm = gettimer(ui, opts)
661 661 cl = repo.changelog
662 662 def s():
663 663 clearcaches(cl)
664 664 def d():
665 665 len(cl.headrevs())
666 666 timer(d, setup=s)
667 667 fm.end()
668 668
669 669 @command(b'perftags', formatteropts+
670 670 [
671 671 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
672 672 ])
673 673 def perftags(ui, repo, **opts):
674 674 opts = _byteskwargs(opts)
675 675 timer, fm = gettimer(ui, opts)
676 676 repocleartagscache = repocleartagscachefunc(repo)
677 677 clearrevlogs = opts[b'clear_revlogs']
678 678 def s():
679 679 if clearrevlogs:
680 680 clearchangelog(repo)
681 681 clearfilecache(repo.unfiltered(), 'manifest')
682 682 repocleartagscache()
683 683 def t():
684 684 return len(repo.tags())
685 685 timer(t, setup=s)
686 686 fm.end()
687 687
688 688 @command(b'perfancestors', formatteropts)
689 689 def perfancestors(ui, repo, **opts):
690 690 opts = _byteskwargs(opts)
691 691 timer, fm = gettimer(ui, opts)
692 692 heads = repo.changelog.headrevs()
693 693 def d():
694 694 for a in repo.changelog.ancestors(heads):
695 695 pass
696 696 timer(d)
697 697 fm.end()
698 698
699 699 @command(b'perfancestorset', formatteropts)
700 700 def perfancestorset(ui, repo, revset, **opts):
701 701 opts = _byteskwargs(opts)
702 702 timer, fm = gettimer(ui, opts)
703 703 revs = repo.revs(revset)
704 704 heads = repo.changelog.headrevs()
705 705 def d():
706 706 s = repo.changelog.ancestors(heads)
707 707 for rev in revs:
708 708 rev in s
709 709 timer(d)
710 710 fm.end()
711 711
712 712 @command(b'perfdiscovery', formatteropts, b'PATH')
713 713 def perfdiscovery(ui, repo, path, **opts):
714 714 """benchmark discovery between local repo and the peer at given path
715 715 """
716 716 repos = [repo, None]
717 717 timer, fm = gettimer(ui, opts)
718 718 path = ui.expandpath(path)
719 719
720 720 def s():
721 721 repos[1] = hg.peer(ui, opts, path)
722 722 def d():
723 723 setdiscovery.findcommonheads(ui, *repos)
724 724 timer(d, setup=s)
725 725 fm.end()
726 726
727 727 @command(b'perfbookmarks', formatteropts +
728 728 [
729 729 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
730 730 ])
731 731 def perfbookmarks(ui, repo, **opts):
732 732 """benchmark parsing bookmarks from disk to memory"""
733 733 opts = _byteskwargs(opts)
734 734 timer, fm = gettimer(ui, opts)
735 735
736 736 clearrevlogs = opts[b'clear_revlogs']
737 737 def s():
738 738 if clearrevlogs:
739 739 clearchangelog(repo)
740 740 clearfilecache(repo, b'_bookmarks')
741 741 def d():
742 742 repo._bookmarks
743 743 timer(d, setup=s)
744 744 fm.end()
745 745
746 746 @command(b'perfbundleread', formatteropts, b'BUNDLE')
747 747 def perfbundleread(ui, repo, bundlepath, **opts):
748 748 """Benchmark reading of bundle files.
749 749
750 750 This command is meant to isolate the I/O part of bundle reading as
751 751 much as possible.
752 752 """
753 753 from mercurial import (
754 754 bundle2,
755 755 exchange,
756 756 streamclone,
757 757 )
758 758
759 759 opts = _byteskwargs(opts)
760 760
761 761 def makebench(fn):
762 762 def run():
763 763 with open(bundlepath, b'rb') as fh:
764 764 bundle = exchange.readbundle(ui, fh, bundlepath)
765 765 fn(bundle)
766 766
767 767 return run
768 768
769 769 def makereadnbytes(size):
770 770 def run():
771 771 with open(bundlepath, b'rb') as fh:
772 772 bundle = exchange.readbundle(ui, fh, bundlepath)
773 773 while bundle.read(size):
774 774 pass
775 775
776 776 return run
777 777
778 778 def makestdioread(size):
779 779 def run():
780 780 with open(bundlepath, b'rb') as fh:
781 781 while fh.read(size):
782 782 pass
783 783
784 784 return run
785 785
786 786 # bundle1
787 787
788 788 def deltaiter(bundle):
789 789 for delta in bundle.deltaiter():
790 790 pass
791 791
792 792 def iterchunks(bundle):
793 793 for chunk in bundle.getchunks():
794 794 pass
795 795
796 796 # bundle2
797 797
798 798 def forwardchunks(bundle):
799 799 for chunk in bundle._forwardchunks():
800 800 pass
801 801
802 802 def iterparts(bundle):
803 803 for part in bundle.iterparts():
804 804 pass
805 805
806 806 def iterpartsseekable(bundle):
807 807 for part in bundle.iterparts(seekable=True):
808 808 pass
809 809
810 810 def seek(bundle):
811 811 for part in bundle.iterparts(seekable=True):
812 812 part.seek(0, os.SEEK_END)
813 813
814 814 def makepartreadnbytes(size):
815 815 def run():
816 816 with open(bundlepath, b'rb') as fh:
817 817 bundle = exchange.readbundle(ui, fh, bundlepath)
818 818 for part in bundle.iterparts():
819 819 while part.read(size):
820 820 pass
821 821
822 822 return run
823 823
824 824 benches = [
825 825 (makestdioread(8192), b'read(8k)'),
826 826 (makestdioread(16384), b'read(16k)'),
827 827 (makestdioread(32768), b'read(32k)'),
828 828 (makestdioread(131072), b'read(128k)'),
829 829 ]
830 830
831 831 with open(bundlepath, b'rb') as fh:
832 832 bundle = exchange.readbundle(ui, fh, bundlepath)
833 833
834 834 if isinstance(bundle, changegroup.cg1unpacker):
835 835 benches.extend([
836 836 (makebench(deltaiter), b'cg1 deltaiter()'),
837 837 (makebench(iterchunks), b'cg1 getchunks()'),
838 838 (makereadnbytes(8192), b'cg1 read(8k)'),
839 839 (makereadnbytes(16384), b'cg1 read(16k)'),
840 840 (makereadnbytes(32768), b'cg1 read(32k)'),
841 841 (makereadnbytes(131072), b'cg1 read(128k)'),
842 842 ])
843 843 elif isinstance(bundle, bundle2.unbundle20):
844 844 benches.extend([
845 845 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
846 846 (makebench(iterparts), b'bundle2 iterparts()'),
847 847 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
848 848 (makebench(seek), b'bundle2 part seek()'),
849 849 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
850 850 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
851 851 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
852 852 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
853 853 ])
854 854 elif isinstance(bundle, streamclone.streamcloneapplier):
855 855 raise error.Abort(b'stream clone bundles not supported')
856 856 else:
857 857 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
858 858
859 859 for fn, title in benches:
860 860 timer, fm = gettimer(ui, opts)
861 861 timer(fn, title=title)
862 862 fm.end()
863 863
864 864 @command(b'perfchangegroupchangelog', formatteropts +
865 865 [(b'', b'cgversion', b'02', b'changegroup version'),
866 866 (b'r', b'rev', b'', b'revisions to add to changegroup')])
867 867 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
868 868 """Benchmark producing a changelog group for a changegroup.
869 869
870 870 This measures the time spent processing the changelog during a
871 871 bundle operation. This occurs during `hg bundle` and on a server
872 872 processing a `getbundle` wire protocol request (handles clones
873 873 and pull requests).
874 874
875 875 By default, all revisions are added to the changegroup.
876 876 """
877 877 opts = _byteskwargs(opts)
878 878 cl = repo.changelog
879 879 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
880 880 bundler = changegroup.getbundler(cgversion, repo)
881 881
882 882 def d():
883 883 state, chunks = bundler._generatechangelog(cl, nodes)
884 884 for chunk in chunks:
885 885 pass
886 886
887 887 timer, fm = gettimer(ui, opts)
888 888
889 889 # Terminal printing can interfere with timing. So disable it.
890 890 with ui.configoverride({(b'progress', b'disable'): True}):
891 891 timer(d)
892 892
893 893 fm.end()
894 894
895 895 @command(b'perfdirs', formatteropts)
896 896 def perfdirs(ui, repo, **opts):
897 897 opts = _byteskwargs(opts)
898 898 timer, fm = gettimer(ui, opts)
899 899 dirstate = repo.dirstate
900 900 b'a' in dirstate
901 901 def d():
902 902 dirstate.hasdir(b'a')
903 903 del dirstate._map._dirs
904 904 timer(d)
905 905 fm.end()
906 906
907 907 @command(b'perfdirstate', formatteropts)
908 908 def perfdirstate(ui, repo, **opts):
909 909 opts = _byteskwargs(opts)
910 910 timer, fm = gettimer(ui, opts)
911 911 b"a" in repo.dirstate
912 912 def d():
913 913 repo.dirstate.invalidate()
914 914 b"a" in repo.dirstate
915 915 timer(d)
916 916 fm.end()
917 917
918 918 @command(b'perfdirstatedirs', formatteropts)
919 919 def perfdirstatedirs(ui, repo, **opts):
920 920 opts = _byteskwargs(opts)
921 921 timer, fm = gettimer(ui, opts)
922 922 b"a" in repo.dirstate
923 923 def d():
924 924 repo.dirstate.hasdir(b"a")
925 925 del repo.dirstate._map._dirs
926 926 timer(d)
927 927 fm.end()
928 928
929 929 @command(b'perfdirstatefoldmap', formatteropts)
930 930 def perfdirstatefoldmap(ui, repo, **opts):
931 931 opts = _byteskwargs(opts)
932 932 timer, fm = gettimer(ui, opts)
933 933 dirstate = repo.dirstate
934 934 b'a' in dirstate
935 935 def d():
936 936 dirstate._map.filefoldmap.get(b'a')
937 937 del dirstate._map.filefoldmap
938 938 timer(d)
939 939 fm.end()
940 940
941 941 @command(b'perfdirfoldmap', formatteropts)
942 942 def perfdirfoldmap(ui, repo, **opts):
943 943 opts = _byteskwargs(opts)
944 944 timer, fm = gettimer(ui, opts)
945 945 dirstate = repo.dirstate
946 946 b'a' in dirstate
947 947 def d():
948 948 dirstate._map.dirfoldmap.get(b'a')
949 949 del dirstate._map.dirfoldmap
950 950 del dirstate._map._dirs
951 951 timer(d)
952 952 fm.end()
953 953
954 954 @command(b'perfdirstatewrite', formatteropts)
955 955 def perfdirstatewrite(ui, repo, **opts):
956 956 opts = _byteskwargs(opts)
957 957 timer, fm = gettimer(ui, opts)
958 958 ds = repo.dirstate
959 959 b"a" in ds
960 960 def d():
961 961 ds._dirty = True
962 962 ds.write(repo.currenttransaction())
963 963 timer(d)
964 964 fm.end()
965 965
966 966 @command(b'perfmergecalculate',
967 [(b'r', b'rev', b'.', b'rev to merge against')] + formatteropts)
967 [
968 (b'r', b'rev', b'.', b'rev to merge against'),
969 (b'', b'from', b'', b'rev to merge from'),
970 ] + formatteropts)
968 971 def perfmergecalculate(ui, repo, rev, **opts):
969 972 opts = _byteskwargs(opts)
970 973 timer, fm = gettimer(ui, opts)
974
975 if opts['from']:
976 fromrev = scmutil.revsingle(repo, opts['from'])
977 wctx = repo[fromrev]
978 else:
971 979 wctx = repo[None]
980 # we don't want working dir files to be stat'd in the benchmark, so
981 # prime that cache
982 wctx.dirty()
972 983 rctx = scmutil.revsingle(repo, rev, rev)
973 984 ancestor = wctx.ancestor(rctx)
974 # we don't want working dir files to be stat'd in the benchmark, so prime
975 # that cache
976 wctx.dirty()
977 985 def d():
978 986 # acceptremote is True because we don't want prompts in the middle of
979 987 # our benchmark
980 988 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
981 989 acceptremote=True, followcopies=True)
982 990 timer(d)
983 991 fm.end()
984 992
985 993 @command(b'perfpathcopies', [], b"REV REV")
986 994 def perfpathcopies(ui, repo, rev1, rev2, **opts):
987 995 """benchmark the copy tracing logic"""
988 996 opts = _byteskwargs(opts)
989 997 timer, fm = gettimer(ui, opts)
990 998 ctx1 = scmutil.revsingle(repo, rev1, rev1)
991 999 ctx2 = scmutil.revsingle(repo, rev2, rev2)
992 1000 def d():
993 1001 copies.pathcopies(ctx1, ctx2)
994 1002 timer(d)
995 1003 fm.end()
996 1004
997 1005 @command(b'perfphases',
998 1006 [(b'', b'full', False, b'include file reading time too'),
999 1007 ], b"")
1000 1008 def perfphases(ui, repo, **opts):
1001 1009 """benchmark phasesets computation"""
1002 1010 opts = _byteskwargs(opts)
1003 1011 timer, fm = gettimer(ui, opts)
1004 1012 _phases = repo._phasecache
1005 1013 full = opts.get(b'full')
1006 1014 def d():
1007 1015 phases = _phases
1008 1016 if full:
1009 1017 clearfilecache(repo, b'_phasecache')
1010 1018 phases = repo._phasecache
1011 1019 phases.invalidate()
1012 1020 phases.loadphaserevs(repo)
1013 1021 timer(d)
1014 1022 fm.end()
1015 1023
1016 1024 @command(b'perfphasesremote',
1017 1025 [], b"[DEST]")
1018 1026 def perfphasesremote(ui, repo, dest=None, **opts):
1019 1027 """benchmark time needed to analyse phases of the remote server"""
1020 1028 from mercurial.node import (
1021 1029 bin,
1022 1030 )
1023 1031 from mercurial import (
1024 1032 exchange,
1025 1033 hg,
1026 1034 phases,
1027 1035 )
1028 1036 opts = _byteskwargs(opts)
1029 1037 timer, fm = gettimer(ui, opts)
1030 1038
1031 1039 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
1032 1040 if not path:
1033 1041 raise error.Abort((b'default repository not configured!'),
1034 1042 hint=(b"see 'hg help config.paths'"))
1035 1043 dest = path.pushloc or path.loc
1036 1044 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
1037 1045 other = hg.peer(repo, opts, dest)
1038 1046
1039 1047 # easier to perform discovery through the operation
1040 1048 op = exchange.pushoperation(repo, other)
1041 1049 exchange._pushdiscoverychangeset(op)
1042 1050
1043 1051 remotesubset = op.fallbackheads
1044 1052
1045 1053 with other.commandexecutor() as e:
1046 1054 remotephases = e.callcommand(b'listkeys',
1047 1055 {b'namespace': b'phases'}).result()
1048 1056 del other
1049 1057 publishing = remotephases.get(b'publishing', False)
1050 1058 if publishing:
1051 1059 ui.status((b'publishing: yes\n'))
1052 1060 else:
1053 1061 ui.status((b'publishing: no\n'))
1054 1062
1055 1063 nodemap = repo.changelog.nodemap
1056 1064 nonpublishroots = 0
1057 1065 for nhex, phase in remotephases.iteritems():
1058 1066 if nhex == b'publishing': # ignore data related to publish option
1059 1067 continue
1060 1068 node = bin(nhex)
1061 1069 if node in nodemap and int(phase):
1062 1070 nonpublishroots += 1
1063 1071 ui.status((b'number of roots: %d\n') % len(remotephases))
1064 1072 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
1065 1073 def d():
1066 1074 phases.remotephasessummary(repo,
1067 1075 remotesubset,
1068 1076 remotephases)
1069 1077 timer(d)
1070 1078 fm.end()
1071 1079
1072 1080 @command(b'perfmanifest',[
1073 1081 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1074 1082 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1075 1083 ] + formatteropts, b'REV|NODE')
1076 1084 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1077 1085 """benchmark the time to read a manifest from disk and return a usable
1078 1086 dict-like object
1079 1087
1080 1088 Manifest caches are cleared before retrieval."""
1081 1089 opts = _byteskwargs(opts)
1082 1090 timer, fm = gettimer(ui, opts)
1083 1091 if not manifest_rev:
1084 1092 ctx = scmutil.revsingle(repo, rev, rev)
1085 1093 t = ctx.manifestnode()
1086 1094 else:
1087 1095 from mercurial.node import bin
1088 1096
1089 1097 if len(rev) == 40:
1090 1098 t = bin(rev)
1091 1099 else:
1092 1100 try:
1093 1101 rev = int(rev)
1094 1102
1095 1103 if util.safehasattr(repo.manifestlog, b'getstorage'):
1096 1104 t = repo.manifestlog.getstorage(b'').node(rev)
1097 1105 else:
1098 1106 t = repo.manifestlog._revlog.lookup(rev)
1099 1107 except ValueError:
1100 1108 raise error.Abort(b'manifest revision must be integer or full '
1101 1109 b'node')
1102 1110 def d():
1103 1111 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1104 1112 repo.manifestlog[t].read()
1105 1113 timer(d)
1106 1114 fm.end()
1107 1115
1108 1116 @command(b'perfchangeset', formatteropts)
1109 1117 def perfchangeset(ui, repo, rev, **opts):
1110 1118 opts = _byteskwargs(opts)
1111 1119 timer, fm = gettimer(ui, opts)
1112 1120 n = scmutil.revsingle(repo, rev).node()
1113 1121 def d():
1114 1122 repo.changelog.read(n)
1115 1123 #repo.changelog._cache = None
1116 1124 timer(d)
1117 1125 fm.end()
1118 1126
1119 1127 @command(b'perfignore', formatteropts)
1120 1128 def perfignore(ui, repo, **opts):
1121 1129 """benchmark operation related to computing ignore"""
1122 1130 opts = _byteskwargs(opts)
1123 1131 timer, fm = gettimer(ui, opts)
1124 1132 dirstate = repo.dirstate
1125 1133
1126 1134 def setupone():
1127 1135 dirstate.invalidate()
1128 1136 clearfilecache(dirstate, b'_ignore')
1129 1137
1130 1138 def runone():
1131 1139 dirstate._ignore
1132 1140
1133 1141 timer(runone, setup=setupone, title=b"load")
1134 1142 fm.end()
1135 1143
1136 1144 @command(b'perfindex', [
1137 1145 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1138 1146 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1139 1147 ] + formatteropts)
1140 1148 def perfindex(ui, repo, **opts):
1141 1149 """benchmark index creation time followed by a lookup
1142 1150
1143 1151 The default is to look `tip` up. Depending on the index implementation,
1144 1152 the revision looked up can matters. For example, an implementation
1145 1153 scanning the index will have a faster lookup time for `--rev tip` than for
1146 1154 `--rev 0`. The number of looked up revisions and their order can also
1147 1155 matters.
1148 1156
1149 1157 Example of useful set to test:
1150 1158 * tip
1151 1159 * 0
1152 1160 * -10:
1153 1161 * :10
1154 1162 * -10: + :10
1155 1163 * :10: + -10:
1156 1164 * -10000:
1157 1165 * -10000: + 0
1158 1166
1159 1167 It is not currently possible to check for lookup of a missing node. For
1160 1168 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1161 1169 import mercurial.revlog
1162 1170 opts = _byteskwargs(opts)
1163 1171 timer, fm = gettimer(ui, opts)
1164 1172 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1165 1173 if opts[b'no_lookup']:
1166 1174 if opts['rev']:
1167 1175 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1168 1176 nodes = []
1169 1177 elif not opts[b'rev']:
1170 1178 nodes = [repo[b"tip"].node()]
1171 1179 else:
1172 1180 revs = scmutil.revrange(repo, opts[b'rev'])
1173 1181 cl = repo.changelog
1174 1182 nodes = [cl.node(r) for r in revs]
1175 1183
1176 1184 unfi = repo.unfiltered()
1177 1185 # find the filecache func directly
1178 1186 # This avoid polluting the benchmark with the filecache logic
1179 1187 makecl = unfi.__class__.changelog.func
1180 1188 def setup():
1181 1189 # probably not necessary, but for good measure
1182 1190 clearchangelog(unfi)
1183 1191 def d():
1184 1192 cl = makecl(unfi)
1185 1193 for n in nodes:
1186 1194 cl.rev(n)
1187 1195 timer(d, setup=setup)
1188 1196 fm.end()
1189 1197
1190 1198 @command(b'perfnodemap', [
1191 1199 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1192 1200 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1193 1201 ] + formatteropts)
1194 1202 def perfnodemap(ui, repo, **opts):
1195 1203 """benchmark the time necessary to look up revision from a cold nodemap
1196 1204
1197 1205 Depending on the implementation, the amount and order of revision we look
1198 1206 up can varies. Example of useful set to test:
1199 1207 * tip
1200 1208 * 0
1201 1209 * -10:
1202 1210 * :10
1203 1211 * -10: + :10
1204 1212 * :10: + -10:
1205 1213 * -10000:
1206 1214 * -10000: + 0
1207 1215
1208 1216 The command currently focus on valid binary lookup. Benchmarking for
1209 1217 hexlookup, prefix lookup and missing lookup would also be valuable.
1210 1218 """
1211 1219 import mercurial.revlog
1212 1220 opts = _byteskwargs(opts)
1213 1221 timer, fm = gettimer(ui, opts)
1214 1222 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1215 1223
1216 1224 unfi = repo.unfiltered()
1217 1225 clearcaches = opts['clear_caches']
1218 1226 # find the filecache func directly
1219 1227 # This avoid polluting the benchmark with the filecache logic
1220 1228 makecl = unfi.__class__.changelog.func
1221 1229 if not opts[b'rev']:
1222 1230 raise error.Abort('use --rev to specify revisions to look up')
1223 1231 revs = scmutil.revrange(repo, opts[b'rev'])
1224 1232 cl = repo.changelog
1225 1233 nodes = [cl.node(r) for r in revs]
1226 1234
1227 1235 # use a list to pass reference to a nodemap from one closure to the next
1228 1236 nodeget = [None]
1229 1237 def setnodeget():
1230 1238 # probably not necessary, but for good measure
1231 1239 clearchangelog(unfi)
1232 1240 nodeget[0] = makecl(unfi).nodemap.get
1233 1241
1234 1242 def d():
1235 1243 get = nodeget[0]
1236 1244 for n in nodes:
1237 1245 get(n)
1238 1246
1239 1247 setup = None
1240 1248 if clearcaches:
1241 1249 def setup():
1242 1250 setnodeget()
1243 1251 else:
1244 1252 setnodeget()
1245 1253 d() # prewarm the data structure
1246 1254 timer(d, setup=setup)
1247 1255 fm.end()
1248 1256
1249 1257 @command(b'perfstartup', formatteropts)
1250 1258 def perfstartup(ui, repo, **opts):
1251 1259 opts = _byteskwargs(opts)
1252 1260 timer, fm = gettimer(ui, opts)
1253 1261 def d():
1254 1262 if os.name != r'nt':
1255 1263 os.system(b"HGRCPATH= %s version -q > /dev/null" %
1256 1264 fsencode(sys.argv[0]))
1257 1265 else:
1258 1266 os.environ[r'HGRCPATH'] = r' '
1259 1267 os.system(r"%s version -q > NUL" % sys.argv[0])
1260 1268 timer(d)
1261 1269 fm.end()
1262 1270
1263 1271 @command(b'perfparents', formatteropts)
1264 1272 def perfparents(ui, repo, **opts):
1265 1273 """benchmark the time necessary to fetch one changeset's parents.
1266 1274
1267 1275 The fetch is done using the `node identifier`, traversing all object layers
1268 1276 from the repository object. The first N revisions will be used for this
1269 1277 benchmark. N is controlled by the ``perf.parentscount`` config option
1270 1278 (default: 1000).
1271 1279 """
1272 1280 opts = _byteskwargs(opts)
1273 1281 timer, fm = gettimer(ui, opts)
1274 1282 # control the number of commits perfparents iterates over
1275 1283 # experimental config: perf.parentscount
1276 1284 count = getint(ui, b"perf", b"parentscount", 1000)
1277 1285 if len(repo.changelog) < count:
1278 1286 raise error.Abort(b"repo needs %d commits for this test" % count)
1279 1287 repo = repo.unfiltered()
1280 1288 nl = [repo.changelog.node(i) for i in _xrange(count)]
1281 1289 def d():
1282 1290 for n in nl:
1283 1291 repo.changelog.parents(n)
1284 1292 timer(d)
1285 1293 fm.end()
1286 1294
1287 1295 @command(b'perfctxfiles', formatteropts)
1288 1296 def perfctxfiles(ui, repo, x, **opts):
1289 1297 opts = _byteskwargs(opts)
1290 1298 x = int(x)
1291 1299 timer, fm = gettimer(ui, opts)
1292 1300 def d():
1293 1301 len(repo[x].files())
1294 1302 timer(d)
1295 1303 fm.end()
1296 1304
1297 1305 @command(b'perfrawfiles', formatteropts)
1298 1306 def perfrawfiles(ui, repo, x, **opts):
1299 1307 opts = _byteskwargs(opts)
1300 1308 x = int(x)
1301 1309 timer, fm = gettimer(ui, opts)
1302 1310 cl = repo.changelog
1303 1311 def d():
1304 1312 len(cl.read(x)[3])
1305 1313 timer(d)
1306 1314 fm.end()
1307 1315
1308 1316 @command(b'perflookup', formatteropts)
1309 1317 def perflookup(ui, repo, rev, **opts):
1310 1318 opts = _byteskwargs(opts)
1311 1319 timer, fm = gettimer(ui, opts)
1312 1320 timer(lambda: len(repo.lookup(rev)))
1313 1321 fm.end()
1314 1322
1315 1323 @command(b'perflinelogedits',
1316 1324 [(b'n', b'edits', 10000, b'number of edits'),
1317 1325 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1318 1326 ], norepo=True)
1319 1327 def perflinelogedits(ui, **opts):
1320 1328 from mercurial import linelog
1321 1329
1322 1330 opts = _byteskwargs(opts)
1323 1331
1324 1332 edits = opts[b'edits']
1325 1333 maxhunklines = opts[b'max_hunk_lines']
1326 1334
1327 1335 maxb1 = 100000
1328 1336 random.seed(0)
1329 1337 randint = random.randint
1330 1338 currentlines = 0
1331 1339 arglist = []
1332 1340 for rev in _xrange(edits):
1333 1341 a1 = randint(0, currentlines)
1334 1342 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1335 1343 b1 = randint(0, maxb1)
1336 1344 b2 = randint(b1, b1 + maxhunklines)
1337 1345 currentlines += (b2 - b1) - (a2 - a1)
1338 1346 arglist.append((rev, a1, a2, b1, b2))
1339 1347
1340 1348 def d():
1341 1349 ll = linelog.linelog()
1342 1350 for args in arglist:
1343 1351 ll.replacelines(*args)
1344 1352
1345 1353 timer, fm = gettimer(ui, opts)
1346 1354 timer(d)
1347 1355 fm.end()
1348 1356
1349 1357 @command(b'perfrevrange', formatteropts)
1350 1358 def perfrevrange(ui, repo, *specs, **opts):
1351 1359 opts = _byteskwargs(opts)
1352 1360 timer, fm = gettimer(ui, opts)
1353 1361 revrange = scmutil.revrange
1354 1362 timer(lambda: len(revrange(repo, specs)))
1355 1363 fm.end()
1356 1364
1357 1365 @command(b'perfnodelookup', formatteropts)
1358 1366 def perfnodelookup(ui, repo, rev, **opts):
1359 1367 opts = _byteskwargs(opts)
1360 1368 timer, fm = gettimer(ui, opts)
1361 1369 import mercurial.revlog
1362 1370 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1363 1371 n = scmutil.revsingle(repo, rev).node()
1364 1372 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1365 1373 def d():
1366 1374 cl.rev(n)
1367 1375 clearcaches(cl)
1368 1376 timer(d)
1369 1377 fm.end()
1370 1378
1371 1379 @command(b'perflog',
1372 1380 [(b'', b'rename', False, b'ask log to follow renames')
1373 1381 ] + formatteropts)
1374 1382 def perflog(ui, repo, rev=None, **opts):
1375 1383 opts = _byteskwargs(opts)
1376 1384 if rev is None:
1377 1385 rev=[]
1378 1386 timer, fm = gettimer(ui, opts)
1379 1387 ui.pushbuffer()
1380 1388 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1381 1389 copies=opts.get(b'rename')))
1382 1390 ui.popbuffer()
1383 1391 fm.end()
1384 1392
1385 1393 @command(b'perfmoonwalk', formatteropts)
1386 1394 def perfmoonwalk(ui, repo, **opts):
1387 1395 """benchmark walking the changelog backwards
1388 1396
1389 1397 This also loads the changelog data for each revision in the changelog.
1390 1398 """
1391 1399 opts = _byteskwargs(opts)
1392 1400 timer, fm = gettimer(ui, opts)
1393 1401 def moonwalk():
1394 1402 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1395 1403 ctx = repo[i]
1396 1404 ctx.branch() # read changelog data (in addition to the index)
1397 1405 timer(moonwalk)
1398 1406 fm.end()
1399 1407
1400 1408 @command(b'perftemplating',
1401 1409 [(b'r', b'rev', [], b'revisions to run the template on'),
1402 1410 ] + formatteropts)
1403 1411 def perftemplating(ui, repo, testedtemplate=None, **opts):
1404 1412 """test the rendering time of a given template"""
1405 1413 if makelogtemplater is None:
1406 1414 raise error.Abort((b"perftemplating not available with this Mercurial"),
1407 1415 hint=b"use 4.3 or later")
1408 1416
1409 1417 opts = _byteskwargs(opts)
1410 1418
1411 1419 nullui = ui.copy()
1412 1420 nullui.fout = open(os.devnull, r'wb')
1413 1421 nullui.disablepager()
1414 1422 revs = opts.get(b'rev')
1415 1423 if not revs:
1416 1424 revs = [b'all()']
1417 1425 revs = list(scmutil.revrange(repo, revs))
1418 1426
1419 1427 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1420 1428 b' {author|person}: {desc|firstline}\n')
1421 1429 if testedtemplate is None:
1422 1430 testedtemplate = defaulttemplate
1423 1431 displayer = makelogtemplater(nullui, repo, testedtemplate)
1424 1432 def format():
1425 1433 for r in revs:
1426 1434 ctx = repo[r]
1427 1435 displayer.show(ctx)
1428 1436 displayer.flush(ctx)
1429 1437
1430 1438 timer, fm = gettimer(ui, opts)
1431 1439 timer(format)
1432 1440 fm.end()
1433 1441
1434 1442 @command(b'perfhelper-pathcopies', formatteropts +
1435 1443 [
1436 1444 (b'r', b'revs', [], b'restrict search to these revisions'),
1437 1445 (b'', b'timing', False, b'provides extra data (costly)'),
1438 1446 ])
1439 1447 def perfhelperpathcopies(ui, repo, revs=[], **opts):
1440 1448 """find statistic about potential parameters for the `perftracecopies`
1441 1449
1442 1450 This command find source-destination pair relevant for copytracing testing.
1443 1451 It report value for some of the parameters that impact copy tracing time.
1444 1452
1445 1453 If `--timing` is set, rename detection is run and the associated timing
1446 1454 will be reported. The extra details comes at the cost of a slower command
1447 1455 execution.
1448 1456
1449 1457 Since the rename detection is only run once, other factors might easily
1450 1458 affect the precision of the timing. However it should give a good
1451 1459 approximation of which revision pairs are very costly.
1452 1460 """
1453 1461 opts = _byteskwargs(opts)
1454 1462 fm = ui.formatter(b'perf', opts)
1455 1463 dotiming = opts[b'timing']
1456 1464
1457 1465 if dotiming:
1458 1466 header = '%12s %12s %12s %12s %12s %12s\n'
1459 1467 output = ("%(source)12s %(destination)12s "
1460 1468 "%(nbrevs)12d %(nbmissingfiles)12d "
1461 1469 "%(nbrenamedfiles)12d %(time)18.5f\n")
1462 1470 header_names = ("source", "destination", "nb-revs", "nb-files",
1463 1471 "nb-renames", "time")
1464 1472 fm.plain(header % header_names)
1465 1473 else:
1466 1474 header = '%12s %12s %12s %12s\n'
1467 1475 output = ("%(source)12s %(destination)12s "
1468 1476 "%(nbrevs)12d %(nbmissingfiles)12d\n")
1469 1477 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
1470 1478
1471 1479 if not revs:
1472 1480 revs = ['all()']
1473 1481 revs = scmutil.revrange(repo, revs)
1474 1482
1475 1483 roi = repo.revs('merge() and %ld', revs)
1476 1484 for r in roi:
1477 1485 ctx = repo[r]
1478 1486 p1 = ctx.p1().rev()
1479 1487 p2 = ctx.p2().rev()
1480 1488 bases = repo.changelog._commonancestorsheads(p1, p2)
1481 1489 for p in (p1, p2):
1482 1490 for b in bases:
1483 1491 base = repo[b]
1484 1492 parent = repo[p]
1485 1493 missing = copies._computeforwardmissing(base, parent)
1486 1494 if not missing:
1487 1495 continue
1488 1496 data = {
1489 1497 b'source': base.hex(),
1490 1498 b'destination': parent.hex(),
1491 1499 b'nbrevs': len(repo.revs('%d::%d', b, p)),
1492 1500 b'nbmissingfiles': len(missing),
1493 1501 }
1494 1502 if dotiming:
1495 1503 begin = util.timer()
1496 1504 renames = copies.pathcopies(base, parent)
1497 1505 end = util.timer()
1498 1506 # not very stable timing since we did only one run
1499 1507 data['time'] = end - begin
1500 1508 data['nbrenamedfiles'] = len(renames)
1501 1509 fm.startitem()
1502 1510 fm.data(**data)
1503 1511 out = data.copy()
1504 1512 out['source'] = fm.hexfunc(base.node())
1505 1513 out['destination'] = fm.hexfunc(parent.node())
1506 1514 fm.plain(output % out)
1507 1515
1508 1516 fm.end()
1509 1517
1510 1518 @command(b'perfcca', formatteropts)
1511 1519 def perfcca(ui, repo, **opts):
1512 1520 opts = _byteskwargs(opts)
1513 1521 timer, fm = gettimer(ui, opts)
1514 1522 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1515 1523 fm.end()
1516 1524
1517 1525 @command(b'perffncacheload', formatteropts)
1518 1526 def perffncacheload(ui, repo, **opts):
1519 1527 opts = _byteskwargs(opts)
1520 1528 timer, fm = gettimer(ui, opts)
1521 1529 s = repo.store
1522 1530 def d():
1523 1531 s.fncache._load()
1524 1532 timer(d)
1525 1533 fm.end()
1526 1534
1527 1535 @command(b'perffncachewrite', formatteropts)
1528 1536 def perffncachewrite(ui, repo, **opts):
1529 1537 opts = _byteskwargs(opts)
1530 1538 timer, fm = gettimer(ui, opts)
1531 1539 s = repo.store
1532 1540 lock = repo.lock()
1533 1541 s.fncache._load()
1534 1542 tr = repo.transaction(b'perffncachewrite')
1535 1543 tr.addbackup(b'fncache')
1536 1544 def d():
1537 1545 s.fncache._dirty = True
1538 1546 s.fncache.write(tr)
1539 1547 timer(d)
1540 1548 tr.close()
1541 1549 lock.release()
1542 1550 fm.end()
1543 1551
1544 1552 @command(b'perffncacheencode', formatteropts)
1545 1553 def perffncacheencode(ui, repo, **opts):
1546 1554 opts = _byteskwargs(opts)
1547 1555 timer, fm = gettimer(ui, opts)
1548 1556 s = repo.store
1549 1557 s.fncache._load()
1550 1558 def d():
1551 1559 for p in s.fncache.entries:
1552 1560 s.encode(p)
1553 1561 timer(d)
1554 1562 fm.end()
1555 1563
1556 1564 def _bdiffworker(q, blocks, xdiff, ready, done):
1557 1565 while not done.is_set():
1558 1566 pair = q.get()
1559 1567 while pair is not None:
1560 1568 if xdiff:
1561 1569 mdiff.bdiff.xdiffblocks(*pair)
1562 1570 elif blocks:
1563 1571 mdiff.bdiff.blocks(*pair)
1564 1572 else:
1565 1573 mdiff.textdiff(*pair)
1566 1574 q.task_done()
1567 1575 pair = q.get()
1568 1576 q.task_done() # for the None one
1569 1577 with ready:
1570 1578 ready.wait()
1571 1579
1572 1580 def _manifestrevision(repo, mnode):
1573 1581 ml = repo.manifestlog
1574 1582
1575 1583 if util.safehasattr(ml, b'getstorage'):
1576 1584 store = ml.getstorage(b'')
1577 1585 else:
1578 1586 store = ml._revlog
1579 1587
1580 1588 return store.revision(mnode)
1581 1589
1582 1590 @command(b'perfbdiff', revlogopts + formatteropts + [
1583 1591 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1584 1592 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1585 1593 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1586 1594 (b'', b'blocks', False, b'test computing diffs into blocks'),
1587 1595 (b'', b'xdiff', False, b'use xdiff algorithm'),
1588 1596 ],
1589 1597
1590 1598 b'-c|-m|FILE REV')
1591 1599 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1592 1600 """benchmark a bdiff between revisions
1593 1601
1594 1602 By default, benchmark a bdiff between its delta parent and itself.
1595 1603
1596 1604 With ``--count``, benchmark bdiffs between delta parents and self for N
1597 1605 revisions starting at the specified revision.
1598 1606
1599 1607 With ``--alldata``, assume the requested revision is a changeset and
1600 1608 measure bdiffs for all changes related to that changeset (manifest
1601 1609 and filelogs).
1602 1610 """
1603 1611 opts = _byteskwargs(opts)
1604 1612
1605 1613 if opts[b'xdiff'] and not opts[b'blocks']:
1606 1614 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1607 1615
1608 1616 if opts[b'alldata']:
1609 1617 opts[b'changelog'] = True
1610 1618
1611 1619 if opts.get(b'changelog') or opts.get(b'manifest'):
1612 1620 file_, rev = None, file_
1613 1621 elif rev is None:
1614 1622 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1615 1623
1616 1624 blocks = opts[b'blocks']
1617 1625 xdiff = opts[b'xdiff']
1618 1626 textpairs = []
1619 1627
1620 1628 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1621 1629
1622 1630 startrev = r.rev(r.lookup(rev))
1623 1631 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1624 1632 if opts[b'alldata']:
1625 1633 # Load revisions associated with changeset.
1626 1634 ctx = repo[rev]
1627 1635 mtext = _manifestrevision(repo, ctx.manifestnode())
1628 1636 for pctx in ctx.parents():
1629 1637 pman = _manifestrevision(repo, pctx.manifestnode())
1630 1638 textpairs.append((pman, mtext))
1631 1639
1632 1640 # Load filelog revisions by iterating manifest delta.
1633 1641 man = ctx.manifest()
1634 1642 pman = ctx.p1().manifest()
1635 1643 for filename, change in pman.diff(man).items():
1636 1644 fctx = repo.file(filename)
1637 1645 f1 = fctx.revision(change[0][0] or -1)
1638 1646 f2 = fctx.revision(change[1][0] or -1)
1639 1647 textpairs.append((f1, f2))
1640 1648 else:
1641 1649 dp = r.deltaparent(rev)
1642 1650 textpairs.append((r.revision(dp), r.revision(rev)))
1643 1651
1644 1652 withthreads = threads > 0
1645 1653 if not withthreads:
1646 1654 def d():
1647 1655 for pair in textpairs:
1648 1656 if xdiff:
1649 1657 mdiff.bdiff.xdiffblocks(*pair)
1650 1658 elif blocks:
1651 1659 mdiff.bdiff.blocks(*pair)
1652 1660 else:
1653 1661 mdiff.textdiff(*pair)
1654 1662 else:
1655 1663 q = queue()
1656 1664 for i in _xrange(threads):
1657 1665 q.put(None)
1658 1666 ready = threading.Condition()
1659 1667 done = threading.Event()
1660 1668 for i in _xrange(threads):
1661 1669 threading.Thread(target=_bdiffworker,
1662 1670 args=(q, blocks, xdiff, ready, done)).start()
1663 1671 q.join()
1664 1672 def d():
1665 1673 for pair in textpairs:
1666 1674 q.put(pair)
1667 1675 for i in _xrange(threads):
1668 1676 q.put(None)
1669 1677 with ready:
1670 1678 ready.notify_all()
1671 1679 q.join()
1672 1680 timer, fm = gettimer(ui, opts)
1673 1681 timer(d)
1674 1682 fm.end()
1675 1683
1676 1684 if withthreads:
1677 1685 done.set()
1678 1686 for i in _xrange(threads):
1679 1687 q.put(None)
1680 1688 with ready:
1681 1689 ready.notify_all()
1682 1690
1683 1691 @command(b'perfunidiff', revlogopts + formatteropts + [
1684 1692 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1685 1693 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
1686 1694 ], b'-c|-m|FILE REV')
1687 1695 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1688 1696 """benchmark a unified diff between revisions
1689 1697
1690 1698 This doesn't include any copy tracing - it's just a unified diff
1691 1699 of the texts.
1692 1700
1693 1701 By default, benchmark a diff between its delta parent and itself.
1694 1702
1695 1703 With ``--count``, benchmark diffs between delta parents and self for N
1696 1704 revisions starting at the specified revision.
1697 1705
1698 1706 With ``--alldata``, assume the requested revision is a changeset and
1699 1707 measure diffs for all changes related to that changeset (manifest
1700 1708 and filelogs).
1701 1709 """
1702 1710 opts = _byteskwargs(opts)
1703 1711 if opts[b'alldata']:
1704 1712 opts[b'changelog'] = True
1705 1713
1706 1714 if opts.get(b'changelog') or opts.get(b'manifest'):
1707 1715 file_, rev = None, file_
1708 1716 elif rev is None:
1709 1717 raise error.CommandError(b'perfunidiff', b'invalid arguments')
1710 1718
1711 1719 textpairs = []
1712 1720
1713 1721 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
1714 1722
1715 1723 startrev = r.rev(r.lookup(rev))
1716 1724 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1717 1725 if opts[b'alldata']:
1718 1726 # Load revisions associated with changeset.
1719 1727 ctx = repo[rev]
1720 1728 mtext = _manifestrevision(repo, ctx.manifestnode())
1721 1729 for pctx in ctx.parents():
1722 1730 pman = _manifestrevision(repo, pctx.manifestnode())
1723 1731 textpairs.append((pman, mtext))
1724 1732
1725 1733 # Load filelog revisions by iterating manifest delta.
1726 1734 man = ctx.manifest()
1727 1735 pman = ctx.p1().manifest()
1728 1736 for filename, change in pman.diff(man).items():
1729 1737 fctx = repo.file(filename)
1730 1738 f1 = fctx.revision(change[0][0] or -1)
1731 1739 f2 = fctx.revision(change[1][0] or -1)
1732 1740 textpairs.append((f1, f2))
1733 1741 else:
1734 1742 dp = r.deltaparent(rev)
1735 1743 textpairs.append((r.revision(dp), r.revision(rev)))
1736 1744
1737 1745 def d():
1738 1746 for left, right in textpairs:
1739 1747 # The date strings don't matter, so we pass empty strings.
1740 1748 headerlines, hunks = mdiff.unidiff(
1741 1749 left, b'', right, b'', b'left', b'right', binary=False)
1742 1750 # consume iterators in roughly the way patch.py does
1743 1751 b'\n'.join(headerlines)
1744 1752 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1745 1753 timer, fm = gettimer(ui, opts)
1746 1754 timer(d)
1747 1755 fm.end()
1748 1756
1749 1757 @command(b'perfdiffwd', formatteropts)
1750 1758 def perfdiffwd(ui, repo, **opts):
1751 1759 """Profile diff of working directory changes"""
1752 1760 opts = _byteskwargs(opts)
1753 1761 timer, fm = gettimer(ui, opts)
1754 1762 options = {
1755 1763 'w': 'ignore_all_space',
1756 1764 'b': 'ignore_space_change',
1757 1765 'B': 'ignore_blank_lines',
1758 1766 }
1759 1767
1760 1768 for diffopt in ('', 'w', 'b', 'B', 'wB'):
1761 1769 opts = dict((options[c], b'1') for c in diffopt)
1762 1770 def d():
1763 1771 ui.pushbuffer()
1764 1772 commands.diff(ui, repo, **opts)
1765 1773 ui.popbuffer()
1766 1774 diffopt = diffopt.encode('ascii')
1767 1775 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
1768 1776 timer(d, title=title)
1769 1777 fm.end()
1770 1778
1771 1779 @command(b'perfrevlogindex', revlogopts + formatteropts,
1772 1780 b'-c|-m|FILE')
1773 1781 def perfrevlogindex(ui, repo, file_=None, **opts):
1774 1782 """Benchmark operations against a revlog index.
1775 1783
1776 1784 This tests constructing a revlog instance, reading index data,
1777 1785 parsing index data, and performing various operations related to
1778 1786 index data.
1779 1787 """
1780 1788
1781 1789 opts = _byteskwargs(opts)
1782 1790
1783 1791 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
1784 1792
1785 1793 opener = getattr(rl, 'opener') # trick linter
1786 1794 indexfile = rl.indexfile
1787 1795 data = opener.read(indexfile)
1788 1796
1789 1797 header = struct.unpack(b'>I', data[0:4])[0]
1790 1798 version = header & 0xFFFF
1791 1799 if version == 1:
1792 1800 revlogio = revlog.revlogio()
1793 1801 inline = header & (1 << 16)
1794 1802 else:
1795 1803 raise error.Abort((b'unsupported revlog version: %d') % version)
1796 1804
1797 1805 rllen = len(rl)
1798 1806
1799 1807 node0 = rl.node(0)
1800 1808 node25 = rl.node(rllen // 4)
1801 1809 node50 = rl.node(rllen // 2)
1802 1810 node75 = rl.node(rllen // 4 * 3)
1803 1811 node100 = rl.node(rllen - 1)
1804 1812
1805 1813 allrevs = range(rllen)
1806 1814 allrevsrev = list(reversed(allrevs))
1807 1815 allnodes = [rl.node(rev) for rev in range(rllen)]
1808 1816 allnodesrev = list(reversed(allnodes))
1809 1817
1810 1818 def constructor():
1811 1819 revlog.revlog(opener, indexfile)
1812 1820
1813 1821 def read():
1814 1822 with opener(indexfile) as fh:
1815 1823 fh.read()
1816 1824
1817 1825 def parseindex():
1818 1826 revlogio.parseindex(data, inline)
1819 1827
1820 1828 def getentry(revornode):
1821 1829 index = revlogio.parseindex(data, inline)[0]
1822 1830 index[revornode]
1823 1831
1824 1832 def getentries(revs, count=1):
1825 1833 index = revlogio.parseindex(data, inline)[0]
1826 1834
1827 1835 for i in range(count):
1828 1836 for rev in revs:
1829 1837 index[rev]
1830 1838
1831 1839 def resolvenode(node):
1832 1840 nodemap = revlogio.parseindex(data, inline)[1]
1833 1841 # This only works for the C code.
1834 1842 if nodemap is None:
1835 1843 return
1836 1844
1837 1845 try:
1838 1846 nodemap[node]
1839 1847 except error.RevlogError:
1840 1848 pass
1841 1849
1842 1850 def resolvenodes(nodes, count=1):
1843 1851 nodemap = revlogio.parseindex(data, inline)[1]
1844 1852 if nodemap is None:
1845 1853 return
1846 1854
1847 1855 for i in range(count):
1848 1856 for node in nodes:
1849 1857 try:
1850 1858 nodemap[node]
1851 1859 except error.RevlogError:
1852 1860 pass
1853 1861
1854 1862 benches = [
1855 1863 (constructor, b'revlog constructor'),
1856 1864 (read, b'read'),
1857 1865 (parseindex, b'create index object'),
1858 1866 (lambda: getentry(0), b'retrieve index entry for rev 0'),
1859 1867 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
1860 1868 (lambda: resolvenode(node0), b'look up node at rev 0'),
1861 1869 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
1862 1870 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
1863 1871 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
1864 1872 (lambda: resolvenode(node100), b'look up node at tip'),
1865 1873 # 2x variation is to measure caching impact.
1866 1874 (lambda: resolvenodes(allnodes),
1867 1875 b'look up all nodes (forward)'),
1868 1876 (lambda: resolvenodes(allnodes, 2),
1869 1877 b'look up all nodes 2x (forward)'),
1870 1878 (lambda: resolvenodes(allnodesrev),
1871 1879 b'look up all nodes (reverse)'),
1872 1880 (lambda: resolvenodes(allnodesrev, 2),
1873 1881 b'look up all nodes 2x (reverse)'),
1874 1882 (lambda: getentries(allrevs),
1875 1883 b'retrieve all index entries (forward)'),
1876 1884 (lambda: getentries(allrevs, 2),
1877 1885 b'retrieve all index entries 2x (forward)'),
1878 1886 (lambda: getentries(allrevsrev),
1879 1887 b'retrieve all index entries (reverse)'),
1880 1888 (lambda: getentries(allrevsrev, 2),
1881 1889 b'retrieve all index entries 2x (reverse)'),
1882 1890 ]
1883 1891
1884 1892 for fn, title in benches:
1885 1893 timer, fm = gettimer(ui, opts)
1886 1894 timer(fn, title=title)
1887 1895 fm.end()
1888 1896
1889 1897 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
1890 1898 [(b'd', b'dist', 100, b'distance between the revisions'),
1891 1899 (b's', b'startrev', 0, b'revision to start reading at'),
1892 1900 (b'', b'reverse', False, b'read in reverse')],
1893 1901 b'-c|-m|FILE')
1894 1902 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
1895 1903 **opts):
1896 1904 """Benchmark reading a series of revisions from a revlog.
1897 1905
1898 1906 By default, we read every ``-d/--dist`` revision from 0 to tip of
1899 1907 the specified revlog.
1900 1908
1901 1909 The start revision can be defined via ``-s/--startrev``.
1902 1910 """
1903 1911 opts = _byteskwargs(opts)
1904 1912
1905 1913 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
1906 1914 rllen = getlen(ui)(rl)
1907 1915
1908 1916 if startrev < 0:
1909 1917 startrev = rllen + startrev
1910 1918
1911 1919 def d():
1912 1920 rl.clearcaches()
1913 1921
1914 1922 beginrev = startrev
1915 1923 endrev = rllen
1916 1924 dist = opts[b'dist']
1917 1925
1918 1926 if reverse:
1919 1927 beginrev, endrev = endrev - 1, beginrev - 1
1920 1928 dist = -1 * dist
1921 1929
1922 1930 for x in _xrange(beginrev, endrev, dist):
1923 1931 # Old revisions don't support passing int.
1924 1932 n = rl.node(x)
1925 1933 rl.revision(n)
1926 1934
1927 1935 timer, fm = gettimer(ui, opts)
1928 1936 timer(d)
1929 1937 fm.end()
1930 1938
1931 1939 @command(b'perfrevlogwrite', revlogopts + formatteropts +
1932 1940 [(b's', b'startrev', 1000, b'revision to start writing at'),
1933 1941 (b'', b'stoprev', -1, b'last revision to write'),
1934 1942 (b'', b'count', 3, b'last revision to write'),
1935 1943 (b'', b'details', False, b'print timing for every revisions tested'),
1936 1944 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
1937 1945 (b'', b'lazydeltabase', True, b'try the provided delta first'),
1938 1946 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1939 1947 ],
1940 1948 b'-c|-m|FILE')
1941 1949 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
1942 1950 """Benchmark writing a series of revisions to a revlog.
1943 1951
1944 1952 Possible source values are:
1945 1953 * `full`: add from a full text (default).
1946 1954 * `parent-1`: add from a delta to the first parent
1947 1955 * `parent-2`: add from a delta to the second parent if it exists
1948 1956 (use a delta from the first parent otherwise)
1949 1957 * `parent-smallest`: add from the smallest delta (either p1 or p2)
1950 1958 * `storage`: add from the existing precomputed deltas
1951 1959 """
1952 1960 opts = _byteskwargs(opts)
1953 1961
1954 1962 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
1955 1963 rllen = getlen(ui)(rl)
1956 1964 if startrev < 0:
1957 1965 startrev = rllen + startrev
1958 1966 if stoprev < 0:
1959 1967 stoprev = rllen + stoprev
1960 1968
1961 1969 lazydeltabase = opts['lazydeltabase']
1962 1970 source = opts['source']
1963 1971 clearcaches = opts['clear_caches']
1964 1972 validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest',
1965 1973 b'storage')
1966 1974 if source not in validsource:
1967 1975 raise error.Abort('invalid source type: %s' % source)
1968 1976
1969 1977 ### actually gather results
1970 1978 count = opts['count']
1971 1979 if count <= 0:
1972 1980 raise error.Abort('invalide run count: %d' % count)
1973 1981 allresults = []
1974 1982 for c in range(count):
1975 1983 timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1,
1976 1984 lazydeltabase=lazydeltabase,
1977 1985 clearcaches=clearcaches)
1978 1986 allresults.append(timing)
1979 1987
1980 1988 ### consolidate the results in a single list
1981 1989 results = []
1982 1990 for idx, (rev, t) in enumerate(allresults[0]):
1983 1991 ts = [t]
1984 1992 for other in allresults[1:]:
1985 1993 orev, ot = other[idx]
1986 1994 assert orev == rev
1987 1995 ts.append(ot)
1988 1996 results.append((rev, ts))
1989 1997 resultcount = len(results)
1990 1998
1991 1999 ### Compute and display relevant statistics
1992 2000
1993 2001 # get a formatter
1994 2002 fm = ui.formatter(b'perf', opts)
1995 2003 displayall = ui.configbool(b"perf", b"all-timing", False)
1996 2004
1997 2005 # print individual details if requested
1998 2006 if opts['details']:
1999 2007 for idx, item in enumerate(results, 1):
2000 2008 rev, data = item
2001 2009 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
2002 2010 formatone(fm, data, title=title, displayall=displayall)
2003 2011
2004 2012 # sorts results by median time
2005 2013 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
2006 2014 # list of (name, index) to display)
2007 2015 relevants = [
2008 2016 ("min", 0),
2009 2017 ("10%", resultcount * 10 // 100),
2010 2018 ("25%", resultcount * 25 // 100),
2011 2019 ("50%", resultcount * 70 // 100),
2012 2020 ("75%", resultcount * 75 // 100),
2013 2021 ("90%", resultcount * 90 // 100),
2014 2022 ("95%", resultcount * 95 // 100),
2015 2023 ("99%", resultcount * 99 // 100),
2016 2024 ("99.9%", resultcount * 999 // 1000),
2017 2025 ("99.99%", resultcount * 9999 // 10000),
2018 2026 ("99.999%", resultcount * 99999 // 100000),
2019 2027 ("max", -1),
2020 2028 ]
2021 2029 if not ui.quiet:
2022 2030 for name, idx in relevants:
2023 2031 data = results[idx]
2024 2032 title = '%s of %d, rev %d' % (name, resultcount, data[0])
2025 2033 formatone(fm, data[1], title=title, displayall=displayall)
2026 2034
2027 2035 # XXX summing that many float will not be very precise, we ignore this fact
2028 2036 # for now
2029 2037 totaltime = []
2030 2038 for item in allresults:
2031 2039 totaltime.append((sum(x[1][0] for x in item),
2032 2040 sum(x[1][1] for x in item),
2033 2041 sum(x[1][2] for x in item),)
2034 2042 )
2035 2043 formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
2036 2044 displayall=displayall)
2037 2045 fm.end()
2038 2046
2039 2047 class _faketr(object):
2040 2048 def add(s, x, y, z=None):
2041 2049 return None
2042 2050
2043 2051 def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None,
2044 2052 lazydeltabase=True, clearcaches=True):
2045 2053 timings = []
2046 2054 tr = _faketr()
2047 2055 with _temprevlog(ui, orig, startrev) as dest:
2048 2056 dest._lazydeltabase = lazydeltabase
2049 2057 revs = list(orig.revs(startrev, stoprev))
2050 2058 total = len(revs)
2051 2059 topic = 'adding'
2052 2060 if runidx is not None:
2053 2061 topic += ' (run #%d)' % runidx
2054 2062 # Support both old and new progress API
2055 2063 if util.safehasattr(ui, 'makeprogress'):
2056 2064 progress = ui.makeprogress(topic, unit='revs', total=total)
2057 2065 def updateprogress(pos):
2058 2066 progress.update(pos)
2059 2067 def completeprogress():
2060 2068 progress.complete()
2061 2069 else:
2062 2070 def updateprogress(pos):
2063 2071 ui.progress(topic, pos, unit='revs', total=total)
2064 2072 def completeprogress():
2065 2073 ui.progress(topic, None, unit='revs', total=total)
2066 2074
2067 2075 for idx, rev in enumerate(revs):
2068 2076 updateprogress(idx)
2069 2077 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
2070 2078 if clearcaches:
2071 2079 dest.index.clearcaches()
2072 2080 dest.clearcaches()
2073 2081 with timeone() as r:
2074 2082 dest.addrawrevision(*addargs, **addkwargs)
2075 2083 timings.append((rev, r[0]))
2076 2084 updateprogress(total)
2077 2085 completeprogress()
2078 2086 return timings
2079 2087
2080 2088 def _getrevisionseed(orig, rev, tr, source):
2081 2089 from mercurial.node import nullid
2082 2090
2083 2091 linkrev = orig.linkrev(rev)
2084 2092 node = orig.node(rev)
2085 2093 p1, p2 = orig.parents(node)
2086 2094 flags = orig.flags(rev)
2087 2095 cachedelta = None
2088 2096 text = None
2089 2097
2090 2098 if source == b'full':
2091 2099 text = orig.revision(rev)
2092 2100 elif source == b'parent-1':
2093 2101 baserev = orig.rev(p1)
2094 2102 cachedelta = (baserev, orig.revdiff(p1, rev))
2095 2103 elif source == b'parent-2':
2096 2104 parent = p2
2097 2105 if p2 == nullid:
2098 2106 parent = p1
2099 2107 baserev = orig.rev(parent)
2100 2108 cachedelta = (baserev, orig.revdiff(parent, rev))
2101 2109 elif source == b'parent-smallest':
2102 2110 p1diff = orig.revdiff(p1, rev)
2103 2111 parent = p1
2104 2112 diff = p1diff
2105 2113 if p2 != nullid:
2106 2114 p2diff = orig.revdiff(p2, rev)
2107 2115 if len(p1diff) > len(p2diff):
2108 2116 parent = p2
2109 2117 diff = p2diff
2110 2118 baserev = orig.rev(parent)
2111 2119 cachedelta = (baserev, diff)
2112 2120 elif source == b'storage':
2113 2121 baserev = orig.deltaparent(rev)
2114 2122 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
2115 2123
2116 2124 return ((text, tr, linkrev, p1, p2),
2117 2125 {'node': node, 'flags': flags, 'cachedelta': cachedelta})
2118 2126
2119 2127 @contextlib.contextmanager
2120 2128 def _temprevlog(ui, orig, truncaterev):
2121 2129 from mercurial import vfs as vfsmod
2122 2130
2123 2131 if orig._inline:
2124 2132 raise error.Abort('not supporting inline revlog (yet)')
2125 2133
2126 2134 origindexpath = orig.opener.join(orig.indexfile)
2127 2135 origdatapath = orig.opener.join(orig.datafile)
2128 2136 indexname = 'revlog.i'
2129 2137 dataname = 'revlog.d'
2130 2138
2131 2139 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
2132 2140 try:
2133 2141 # copy the data file in a temporary directory
2134 2142 ui.debug('copying data in %s\n' % tmpdir)
2135 2143 destindexpath = os.path.join(tmpdir, 'revlog.i')
2136 2144 destdatapath = os.path.join(tmpdir, 'revlog.d')
2137 2145 shutil.copyfile(origindexpath, destindexpath)
2138 2146 shutil.copyfile(origdatapath, destdatapath)
2139 2147
2140 2148 # remove the data we want to add again
2141 2149 ui.debug('truncating data to be rewritten\n')
2142 2150 with open(destindexpath, 'ab') as index:
2143 2151 index.seek(0)
2144 2152 index.truncate(truncaterev * orig._io.size)
2145 2153 with open(destdatapath, 'ab') as data:
2146 2154 data.seek(0)
2147 2155 data.truncate(orig.start(truncaterev))
2148 2156
2149 2157 # instantiate a new revlog from the temporary copy
2150 2158 ui.debug('truncating adding to be rewritten\n')
2151 2159 vfs = vfsmod.vfs(tmpdir)
2152 2160 vfs.options = getattr(orig.opener, 'options', None)
2153 2161
2154 2162 dest = revlog.revlog(vfs,
2155 2163 indexfile=indexname,
2156 2164 datafile=dataname)
2157 2165 if dest._inline:
2158 2166 raise error.Abort('not supporting inline revlog (yet)')
2159 2167 # make sure internals are initialized
2160 2168 dest.revision(len(dest) - 1)
2161 2169 yield dest
2162 2170 del dest, vfs
2163 2171 finally:
2164 2172 shutil.rmtree(tmpdir, True)
2165 2173
2166 2174 @command(b'perfrevlogchunks', revlogopts + formatteropts +
2167 2175 [(b'e', b'engines', b'', b'compression engines to use'),
2168 2176 (b's', b'startrev', 0, b'revision to start at')],
2169 2177 b'-c|-m|FILE')
2170 2178 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
2171 2179 """Benchmark operations on revlog chunks.
2172 2180
2173 2181 Logically, each revlog is a collection of fulltext revisions. However,
2174 2182 stored within each revlog are "chunks" of possibly compressed data. This
2175 2183 data needs to be read and decompressed or compressed and written.
2176 2184
2177 2185 This command measures the time it takes to read+decompress and recompress
2178 2186 chunks in a revlog. It effectively isolates I/O and compression performance.
2179 2187 For measurements of higher-level operations like resolving revisions,
2180 2188 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
2181 2189 """
2182 2190 opts = _byteskwargs(opts)
2183 2191
2184 2192 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
2185 2193
2186 2194 # _chunkraw was renamed to _getsegmentforrevs.
2187 2195 try:
2188 2196 segmentforrevs = rl._getsegmentforrevs
2189 2197 except AttributeError:
2190 2198 segmentforrevs = rl._chunkraw
2191 2199
2192 2200 # Verify engines argument.
2193 2201 if engines:
2194 2202 engines = set(e.strip() for e in engines.split(b','))
2195 2203 for engine in engines:
2196 2204 try:
2197 2205 util.compressionengines[engine]
2198 2206 except KeyError:
2199 2207 raise error.Abort(b'unknown compression engine: %s' % engine)
2200 2208 else:
2201 2209 engines = []
2202 2210 for e in util.compengines:
2203 2211 engine = util.compengines[e]
2204 2212 try:
2205 2213 if engine.available():
2206 2214 engine.revlogcompressor().compress(b'dummy')
2207 2215 engines.append(e)
2208 2216 except NotImplementedError:
2209 2217 pass
2210 2218
2211 2219 revs = list(rl.revs(startrev, len(rl) - 1))
2212 2220
2213 2221 def rlfh(rl):
2214 2222 if rl._inline:
2215 2223 return getsvfs(repo)(rl.indexfile)
2216 2224 else:
2217 2225 return getsvfs(repo)(rl.datafile)
2218 2226
2219 2227 def doread():
2220 2228 rl.clearcaches()
2221 2229 for rev in revs:
2222 2230 segmentforrevs(rev, rev)
2223 2231
2224 2232 def doreadcachedfh():
2225 2233 rl.clearcaches()
2226 2234 fh = rlfh(rl)
2227 2235 for rev in revs:
2228 2236 segmentforrevs(rev, rev, df=fh)
2229 2237
2230 2238 def doreadbatch():
2231 2239 rl.clearcaches()
2232 2240 segmentforrevs(revs[0], revs[-1])
2233 2241
2234 2242 def doreadbatchcachedfh():
2235 2243 rl.clearcaches()
2236 2244 fh = rlfh(rl)
2237 2245 segmentforrevs(revs[0], revs[-1], df=fh)
2238 2246
2239 2247 def dochunk():
2240 2248 rl.clearcaches()
2241 2249 fh = rlfh(rl)
2242 2250 for rev in revs:
2243 2251 rl._chunk(rev, df=fh)
2244 2252
2245 2253 chunks = [None]
2246 2254
2247 2255 def dochunkbatch():
2248 2256 rl.clearcaches()
2249 2257 fh = rlfh(rl)
2250 2258 # Save chunks as a side-effect.
2251 2259 chunks[0] = rl._chunks(revs, df=fh)
2252 2260
2253 2261 def docompress(compressor):
2254 2262 rl.clearcaches()
2255 2263
2256 2264 try:
2257 2265 # Swap in the requested compression engine.
2258 2266 oldcompressor = rl._compressor
2259 2267 rl._compressor = compressor
2260 2268 for chunk in chunks[0]:
2261 2269 rl.compress(chunk)
2262 2270 finally:
2263 2271 rl._compressor = oldcompressor
2264 2272
2265 2273 benches = [
2266 2274 (lambda: doread(), b'read'),
2267 2275 (lambda: doreadcachedfh(), b'read w/ reused fd'),
2268 2276 (lambda: doreadbatch(), b'read batch'),
2269 2277 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
2270 2278 (lambda: dochunk(), b'chunk'),
2271 2279 (lambda: dochunkbatch(), b'chunk batch'),
2272 2280 ]
2273 2281
2274 2282 for engine in sorted(engines):
2275 2283 compressor = util.compengines[engine].revlogcompressor()
2276 2284 benches.append((functools.partial(docompress, compressor),
2277 2285 b'compress w/ %s' % engine))
2278 2286
2279 2287 for fn, title in benches:
2280 2288 timer, fm = gettimer(ui, opts)
2281 2289 timer(fn, title=title)
2282 2290 fm.end()
2283 2291
2284 2292 @command(b'perfrevlogrevision', revlogopts + formatteropts +
2285 2293 [(b'', b'cache', False, b'use caches instead of clearing')],
2286 2294 b'-c|-m|FILE REV')
2287 2295 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
2288 2296 """Benchmark obtaining a revlog revision.
2289 2297
2290 2298 Obtaining a revlog revision consists of roughly the following steps:
2291 2299
2292 2300 1. Compute the delta chain
2293 2301 2. Slice the delta chain if applicable
2294 2302 3. Obtain the raw chunks for that delta chain
2295 2303 4. Decompress each raw chunk
2296 2304 5. Apply binary patches to obtain fulltext
2297 2305 6. Verify hash of fulltext
2298 2306
2299 2307 This command measures the time spent in each of these phases.
2300 2308 """
2301 2309 opts = _byteskwargs(opts)
2302 2310
2303 2311 if opts.get(b'changelog') or opts.get(b'manifest'):
2304 2312 file_, rev = None, file_
2305 2313 elif rev is None:
2306 2314 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
2307 2315
2308 2316 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
2309 2317
2310 2318 # _chunkraw was renamed to _getsegmentforrevs.
2311 2319 try:
2312 2320 segmentforrevs = r._getsegmentforrevs
2313 2321 except AttributeError:
2314 2322 segmentforrevs = r._chunkraw
2315 2323
2316 2324 node = r.lookup(rev)
2317 2325 rev = r.rev(node)
2318 2326
2319 2327 def getrawchunks(data, chain):
2320 2328 start = r.start
2321 2329 length = r.length
2322 2330 inline = r._inline
2323 2331 iosize = r._io.size
2324 2332 buffer = util.buffer
2325 2333
2326 2334 chunks = []
2327 2335 ladd = chunks.append
2328 2336 for idx, item in enumerate(chain):
2329 2337 offset = start(item[0])
2330 2338 bits = data[idx]
2331 2339 for rev in item:
2332 2340 chunkstart = start(rev)
2333 2341 if inline:
2334 2342 chunkstart += (rev + 1) * iosize
2335 2343 chunklength = length(rev)
2336 2344 ladd(buffer(bits, chunkstart - offset, chunklength))
2337 2345
2338 2346 return chunks
2339 2347
2340 2348 def dodeltachain(rev):
2341 2349 if not cache:
2342 2350 r.clearcaches()
2343 2351 r._deltachain(rev)
2344 2352
2345 2353 def doread(chain):
2346 2354 if not cache:
2347 2355 r.clearcaches()
2348 2356 for item in slicedchain:
2349 2357 segmentforrevs(item[0], item[-1])
2350 2358
2351 2359 def doslice(r, chain, size):
2352 2360 for s in slicechunk(r, chain, targetsize=size):
2353 2361 pass
2354 2362
2355 2363 def dorawchunks(data, chain):
2356 2364 if not cache:
2357 2365 r.clearcaches()
2358 2366 getrawchunks(data, chain)
2359 2367
2360 2368 def dodecompress(chunks):
2361 2369 decomp = r.decompress
2362 2370 for chunk in chunks:
2363 2371 decomp(chunk)
2364 2372
2365 2373 def dopatch(text, bins):
2366 2374 if not cache:
2367 2375 r.clearcaches()
2368 2376 mdiff.patches(text, bins)
2369 2377
2370 2378 def dohash(text):
2371 2379 if not cache:
2372 2380 r.clearcaches()
2373 2381 r.checkhash(text, node, rev=rev)
2374 2382
2375 2383 def dorevision():
2376 2384 if not cache:
2377 2385 r.clearcaches()
2378 2386 r.revision(node)
2379 2387
2380 2388 try:
2381 2389 from mercurial.revlogutils.deltas import slicechunk
2382 2390 except ImportError:
2383 2391 slicechunk = getattr(revlog, '_slicechunk', None)
2384 2392
2385 2393 size = r.length(rev)
2386 2394 chain = r._deltachain(rev)[0]
2387 2395 if not getattr(r, '_withsparseread', False):
2388 2396 slicedchain = (chain,)
2389 2397 else:
2390 2398 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
2391 2399 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
2392 2400 rawchunks = getrawchunks(data, slicedchain)
2393 2401 bins = r._chunks(chain)
2394 2402 text = bytes(bins[0])
2395 2403 bins = bins[1:]
2396 2404 text = mdiff.patches(text, bins)
2397 2405
2398 2406 benches = [
2399 2407 (lambda: dorevision(), b'full'),
2400 2408 (lambda: dodeltachain(rev), b'deltachain'),
2401 2409 (lambda: doread(chain), b'read'),
2402 2410 ]
2403 2411
2404 2412 if getattr(r, '_withsparseread', False):
2405 2413 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
2406 2414 benches.append(slicing)
2407 2415
2408 2416 benches.extend([
2409 2417 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
2410 2418 (lambda: dodecompress(rawchunks), b'decompress'),
2411 2419 (lambda: dopatch(text, bins), b'patch'),
2412 2420 (lambda: dohash(text), b'hash'),
2413 2421 ])
2414 2422
2415 2423 timer, fm = gettimer(ui, opts)
2416 2424 for fn, title in benches:
2417 2425 timer(fn, title=title)
2418 2426 fm.end()
2419 2427
2420 2428 @command(b'perfrevset',
2421 2429 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
2422 2430 (b'', b'contexts', False, b'obtain changectx for each revision')]
2423 2431 + formatteropts, b"REVSET")
2424 2432 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
2425 2433 """benchmark the execution time of a revset
2426 2434
2427 2435 Use the --clean option if need to evaluate the impact of build volatile
2428 2436 revisions set cache on the revset execution. Volatile cache hold filtered
2429 2437 and obsolete related cache."""
2430 2438 opts = _byteskwargs(opts)
2431 2439
2432 2440 timer, fm = gettimer(ui, opts)
2433 2441 def d():
2434 2442 if clear:
2435 2443 repo.invalidatevolatilesets()
2436 2444 if contexts:
2437 2445 for ctx in repo.set(expr): pass
2438 2446 else:
2439 2447 for r in repo.revs(expr): pass
2440 2448 timer(d)
2441 2449 fm.end()
2442 2450
2443 2451 @command(b'perfvolatilesets',
2444 2452 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
2445 2453 ] + formatteropts)
2446 2454 def perfvolatilesets(ui, repo, *names, **opts):
2447 2455 """benchmark the computation of various volatile set
2448 2456
2449 2457 Volatile set computes element related to filtering and obsolescence."""
2450 2458 opts = _byteskwargs(opts)
2451 2459 timer, fm = gettimer(ui, opts)
2452 2460 repo = repo.unfiltered()
2453 2461
2454 2462 def getobs(name):
2455 2463 def d():
2456 2464 repo.invalidatevolatilesets()
2457 2465 if opts[b'clear_obsstore']:
2458 2466 clearfilecache(repo, b'obsstore')
2459 2467 obsolete.getrevs(repo, name)
2460 2468 return d
2461 2469
2462 2470 allobs = sorted(obsolete.cachefuncs)
2463 2471 if names:
2464 2472 allobs = [n for n in allobs if n in names]
2465 2473
2466 2474 for name in allobs:
2467 2475 timer(getobs(name), title=name)
2468 2476
2469 2477 def getfiltered(name):
2470 2478 def d():
2471 2479 repo.invalidatevolatilesets()
2472 2480 if opts[b'clear_obsstore']:
2473 2481 clearfilecache(repo, b'obsstore')
2474 2482 repoview.filterrevs(repo, name)
2475 2483 return d
2476 2484
2477 2485 allfilter = sorted(repoview.filtertable)
2478 2486 if names:
2479 2487 allfilter = [n for n in allfilter if n in names]
2480 2488
2481 2489 for name in allfilter:
2482 2490 timer(getfiltered(name), title=name)
2483 2491 fm.end()
2484 2492
2485 2493 @command(b'perfbranchmap',
2486 2494 [(b'f', b'full', False,
2487 2495 b'Includes build time of subset'),
2488 2496 (b'', b'clear-revbranch', False,
2489 2497 b'purge the revbranch cache between computation'),
2490 2498 ] + formatteropts)
2491 2499 def perfbranchmap(ui, repo, *filternames, **opts):
2492 2500 """benchmark the update of a branchmap
2493 2501
2494 2502 This benchmarks the full repo.branchmap() call with read and write disabled
2495 2503 """
2496 2504 opts = _byteskwargs(opts)
2497 2505 full = opts.get(b"full", False)
2498 2506 clear_revbranch = opts.get(b"clear_revbranch", False)
2499 2507 timer, fm = gettimer(ui, opts)
2500 2508 def getbranchmap(filtername):
2501 2509 """generate a benchmark function for the filtername"""
2502 2510 if filtername is None:
2503 2511 view = repo
2504 2512 else:
2505 2513 view = repo.filtered(filtername)
2506 2514 if util.safehasattr(view._branchcaches, '_per_filter'):
2507 2515 filtered = view._branchcaches._per_filter
2508 2516 else:
2509 2517 # older versions
2510 2518 filtered = view._branchcaches
2511 2519 def d():
2512 2520 if clear_revbranch:
2513 2521 repo.revbranchcache()._clear()
2514 2522 if full:
2515 2523 view._branchcaches.clear()
2516 2524 else:
2517 2525 filtered.pop(filtername, None)
2518 2526 view.branchmap()
2519 2527 return d
2520 2528 # add filter in smaller subset to bigger subset
2521 2529 possiblefilters = set(repoview.filtertable)
2522 2530 if filternames:
2523 2531 possiblefilters &= set(filternames)
2524 2532 subsettable = getbranchmapsubsettable()
2525 2533 allfilters = []
2526 2534 while possiblefilters:
2527 2535 for name in possiblefilters:
2528 2536 subset = subsettable.get(name)
2529 2537 if subset not in possiblefilters:
2530 2538 break
2531 2539 else:
2532 2540 assert False, b'subset cycle %s!' % possiblefilters
2533 2541 allfilters.append(name)
2534 2542 possiblefilters.remove(name)
2535 2543
2536 2544 # warm the cache
2537 2545 if not full:
2538 2546 for name in allfilters:
2539 2547 repo.filtered(name).branchmap()
2540 2548 if not filternames or b'unfiltered' in filternames:
2541 2549 # add unfiltered
2542 2550 allfilters.append(None)
2543 2551
2544 2552 if util.safehasattr(branchmap.branchcache, 'fromfile'):
2545 2553 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
2546 2554 branchcacheread.set(classmethod(lambda *args: None))
2547 2555 else:
2548 2556 # older versions
2549 2557 branchcacheread = safeattrsetter(branchmap, b'read')
2550 2558 branchcacheread.set(lambda *args: None)
2551 2559 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
2552 2560 branchcachewrite.set(lambda *args: None)
2553 2561 try:
2554 2562 for name in allfilters:
2555 2563 printname = name
2556 2564 if name is None:
2557 2565 printname = b'unfiltered'
2558 2566 timer(getbranchmap(name), title=str(printname))
2559 2567 finally:
2560 2568 branchcacheread.restore()
2561 2569 branchcachewrite.restore()
2562 2570 fm.end()
2563 2571
2564 2572 @command(b'perfbranchmapupdate', [
2565 2573 (b'', b'base', [], b'subset of revision to start from'),
2566 2574 (b'', b'target', [], b'subset of revision to end with'),
2567 2575 (b'', b'clear-caches', False, b'clear cache between each runs')
2568 2576 ] + formatteropts)
2569 2577 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
2570 2578 """benchmark branchmap update from for <base> revs to <target> revs
2571 2579
2572 2580 If `--clear-caches` is passed, the following items will be reset before
2573 2581 each update:
2574 2582 * the changelog instance and associated indexes
2575 2583 * the rev-branch-cache instance
2576 2584
2577 2585 Examples:
2578 2586
2579 2587 # update for the one last revision
2580 2588 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
2581 2589
2582 2590 $ update for change coming with a new branch
2583 2591 $ hg perfbranchmapupdate --base 'stable' --target 'default'
2584 2592 """
2585 2593 from mercurial import branchmap
2586 2594 from mercurial import repoview
2587 2595 opts = _byteskwargs(opts)
2588 2596 timer, fm = gettimer(ui, opts)
2589 2597 clearcaches = opts[b'clear_caches']
2590 2598 unfi = repo.unfiltered()
2591 2599 x = [None] # used to pass data between closure
2592 2600
2593 2601 # we use a `list` here to avoid possible side effect from smartset
2594 2602 baserevs = list(scmutil.revrange(repo, base))
2595 2603 targetrevs = list(scmutil.revrange(repo, target))
2596 2604 if not baserevs:
2597 2605 raise error.Abort(b'no revisions selected for --base')
2598 2606 if not targetrevs:
2599 2607 raise error.Abort(b'no revisions selected for --target')
2600 2608
2601 2609 # make sure the target branchmap also contains the one in the base
2602 2610 targetrevs = list(set(baserevs) | set(targetrevs))
2603 2611 targetrevs.sort()
2604 2612
2605 2613 cl = repo.changelog
2606 2614 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
2607 2615 allbaserevs.sort()
2608 2616 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
2609 2617
2610 2618 newrevs = list(alltargetrevs.difference(allbaserevs))
2611 2619 newrevs.sort()
2612 2620
2613 2621 allrevs = frozenset(unfi.changelog.revs())
2614 2622 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
2615 2623 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
2616 2624
2617 2625 def basefilter(repo, visibilityexceptions=None):
2618 2626 return basefilterrevs
2619 2627
2620 2628 def targetfilter(repo, visibilityexceptions=None):
2621 2629 return targetfilterrevs
2622 2630
2623 2631 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
2624 2632 ui.status(msg % (len(allbaserevs), len(newrevs)))
2625 2633 if targetfilterrevs:
2626 2634 msg = b'(%d revisions still filtered)\n'
2627 2635 ui.status(msg % len(targetfilterrevs))
2628 2636
2629 2637 try:
2630 2638 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
2631 2639 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
2632 2640
2633 2641 baserepo = repo.filtered(b'__perf_branchmap_update_base')
2634 2642 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
2635 2643
2636 2644 # try to find an existing branchmap to reuse
2637 2645 subsettable = getbranchmapsubsettable()
2638 2646 candidatefilter = subsettable.get(None)
2639 2647 while candidatefilter is not None:
2640 2648 candidatebm = repo.filtered(candidatefilter).branchmap()
2641 2649 if candidatebm.validfor(baserepo):
2642 2650 filtered = repoview.filterrevs(repo, candidatefilter)
2643 2651 missing = [r for r in allbaserevs if r in filtered]
2644 2652 base = candidatebm.copy()
2645 2653 base.update(baserepo, missing)
2646 2654 break
2647 2655 candidatefilter = subsettable.get(candidatefilter)
2648 2656 else:
2649 2657 # no suitable subset where found
2650 2658 base = branchmap.branchcache()
2651 2659 base.update(baserepo, allbaserevs)
2652 2660
2653 2661 def setup():
2654 2662 x[0] = base.copy()
2655 2663 if clearcaches:
2656 2664 unfi._revbranchcache = None
2657 2665 clearchangelog(repo)
2658 2666
2659 2667 def bench():
2660 2668 x[0].update(targetrepo, newrevs)
2661 2669
2662 2670 timer(bench, setup=setup)
2663 2671 fm.end()
2664 2672 finally:
2665 2673 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
2666 2674 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
2667 2675
2668 2676 @command(b'perfbranchmapload', [
2669 2677 (b'f', b'filter', b'', b'Specify repoview filter'),
2670 2678 (b'', b'list', False, b'List brachmap filter caches'),
2671 2679 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
2672 2680
2673 2681 ] + formatteropts)
2674 2682 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
2675 2683 """benchmark reading the branchmap"""
2676 2684 opts = _byteskwargs(opts)
2677 2685 clearrevlogs = opts[b'clear_revlogs']
2678 2686
2679 2687 if list:
2680 2688 for name, kind, st in repo.cachevfs.readdir(stat=True):
2681 2689 if name.startswith(b'branch2'):
2682 2690 filtername = name.partition(b'-')[2] or b'unfiltered'
2683 2691 ui.status(b'%s - %s\n'
2684 2692 % (filtername, util.bytecount(st.st_size)))
2685 2693 return
2686 2694 if not filter:
2687 2695 filter = None
2688 2696 subsettable = getbranchmapsubsettable()
2689 2697 if filter is None:
2690 2698 repo = repo.unfiltered()
2691 2699 else:
2692 2700 repo = repoview.repoview(repo, filter)
2693 2701
2694 2702 repo.branchmap() # make sure we have a relevant, up to date branchmap
2695 2703
2696 2704 try:
2697 2705 fromfile = branchmap.branchcache.fromfile
2698 2706 except AttributeError:
2699 2707 # older versions
2700 2708 fromfile = branchmap.read
2701 2709
2702 2710 currentfilter = filter
2703 2711 # try once without timer, the filter may not be cached
2704 2712 while fromfile(repo) is None:
2705 2713 currentfilter = subsettable.get(currentfilter)
2706 2714 if currentfilter is None:
2707 2715 raise error.Abort(b'No branchmap cached for %s repo'
2708 2716 % (filter or b'unfiltered'))
2709 2717 repo = repo.filtered(currentfilter)
2710 2718 timer, fm = gettimer(ui, opts)
2711 2719 def setup():
2712 2720 if clearrevlogs:
2713 2721 clearchangelog(repo)
2714 2722 def bench():
2715 2723 fromfile(repo)
2716 2724 timer(bench, setup=setup)
2717 2725 fm.end()
2718 2726
2719 2727 @command(b'perfloadmarkers')
2720 2728 def perfloadmarkers(ui, repo):
2721 2729 """benchmark the time to parse the on-disk markers for a repo
2722 2730
2723 2731 Result is the number of markers in the repo."""
2724 2732 timer, fm = gettimer(ui)
2725 2733 svfs = getsvfs(repo)
2726 2734 timer(lambda: len(obsolete.obsstore(svfs)))
2727 2735 fm.end()
2728 2736
2729 2737 @command(b'perflrucachedict', formatteropts +
2730 2738 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
2731 2739 (b'', b'mincost', 0, b'smallest cost of items in cache'),
2732 2740 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
2733 2741 (b'', b'size', 4, b'size of cache'),
2734 2742 (b'', b'gets', 10000, b'number of key lookups'),
2735 2743 (b'', b'sets', 10000, b'number of key sets'),
2736 2744 (b'', b'mixed', 10000, b'number of mixed mode operations'),
2737 2745 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
2738 2746 norepo=True)
2739 2747 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
2740 2748 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
2741 2749 opts = _byteskwargs(opts)
2742 2750
2743 2751 def doinit():
2744 2752 for i in _xrange(10000):
2745 2753 util.lrucachedict(size)
2746 2754
2747 2755 costrange = list(range(mincost, maxcost + 1))
2748 2756
2749 2757 values = []
2750 2758 for i in _xrange(size):
2751 2759 values.append(random.randint(0, _maxint))
2752 2760
2753 2761 # Get mode fills the cache and tests raw lookup performance with no
2754 2762 # eviction.
2755 2763 getseq = []
2756 2764 for i in _xrange(gets):
2757 2765 getseq.append(random.choice(values))
2758 2766
2759 2767 def dogets():
2760 2768 d = util.lrucachedict(size)
2761 2769 for v in values:
2762 2770 d[v] = v
2763 2771 for key in getseq:
2764 2772 value = d[key]
2765 2773 value # silence pyflakes warning
2766 2774
2767 2775 def dogetscost():
2768 2776 d = util.lrucachedict(size, maxcost=costlimit)
2769 2777 for i, v in enumerate(values):
2770 2778 d.insert(v, v, cost=costs[i])
2771 2779 for key in getseq:
2772 2780 try:
2773 2781 value = d[key]
2774 2782 value # silence pyflakes warning
2775 2783 except KeyError:
2776 2784 pass
2777 2785
2778 2786 # Set mode tests insertion speed with cache eviction.
2779 2787 setseq = []
2780 2788 costs = []
2781 2789 for i in _xrange(sets):
2782 2790 setseq.append(random.randint(0, _maxint))
2783 2791 costs.append(random.choice(costrange))
2784 2792
2785 2793 def doinserts():
2786 2794 d = util.lrucachedict(size)
2787 2795 for v in setseq:
2788 2796 d.insert(v, v)
2789 2797
2790 2798 def doinsertscost():
2791 2799 d = util.lrucachedict(size, maxcost=costlimit)
2792 2800 for i, v in enumerate(setseq):
2793 2801 d.insert(v, v, cost=costs[i])
2794 2802
2795 2803 def dosets():
2796 2804 d = util.lrucachedict(size)
2797 2805 for v in setseq:
2798 2806 d[v] = v
2799 2807
2800 2808 # Mixed mode randomly performs gets and sets with eviction.
2801 2809 mixedops = []
2802 2810 for i in _xrange(mixed):
2803 2811 r = random.randint(0, 100)
2804 2812 if r < mixedgetfreq:
2805 2813 op = 0
2806 2814 else:
2807 2815 op = 1
2808 2816
2809 2817 mixedops.append((op,
2810 2818 random.randint(0, size * 2),
2811 2819 random.choice(costrange)))
2812 2820
2813 2821 def domixed():
2814 2822 d = util.lrucachedict(size)
2815 2823
2816 2824 for op, v, cost in mixedops:
2817 2825 if op == 0:
2818 2826 try:
2819 2827 d[v]
2820 2828 except KeyError:
2821 2829 pass
2822 2830 else:
2823 2831 d[v] = v
2824 2832
2825 2833 def domixedcost():
2826 2834 d = util.lrucachedict(size, maxcost=costlimit)
2827 2835
2828 2836 for op, v, cost in mixedops:
2829 2837 if op == 0:
2830 2838 try:
2831 2839 d[v]
2832 2840 except KeyError:
2833 2841 pass
2834 2842 else:
2835 2843 d.insert(v, v, cost=cost)
2836 2844
2837 2845 benches = [
2838 2846 (doinit, b'init'),
2839 2847 ]
2840 2848
2841 2849 if costlimit:
2842 2850 benches.extend([
2843 2851 (dogetscost, b'gets w/ cost limit'),
2844 2852 (doinsertscost, b'inserts w/ cost limit'),
2845 2853 (domixedcost, b'mixed w/ cost limit'),
2846 2854 ])
2847 2855 else:
2848 2856 benches.extend([
2849 2857 (dogets, b'gets'),
2850 2858 (doinserts, b'inserts'),
2851 2859 (dosets, b'sets'),
2852 2860 (domixed, b'mixed')
2853 2861 ])
2854 2862
2855 2863 for fn, title in benches:
2856 2864 timer, fm = gettimer(ui, opts)
2857 2865 timer(fn, title=title)
2858 2866 fm.end()
2859 2867
2860 2868 @command(b'perfwrite', formatteropts)
2861 2869 def perfwrite(ui, repo, **opts):
2862 2870 """microbenchmark ui.write
2863 2871 """
2864 2872 opts = _byteskwargs(opts)
2865 2873
2866 2874 timer, fm = gettimer(ui, opts)
2867 2875 def write():
2868 2876 for i in range(100000):
2869 2877 ui.write((b'Testing write performance\n'))
2870 2878 timer(write)
2871 2879 fm.end()
2872 2880
2873 2881 def uisetup(ui):
2874 2882 if (util.safehasattr(cmdutil, b'openrevlog') and
2875 2883 not util.safehasattr(commands, b'debugrevlogopts')):
2876 2884 # for "historical portability":
2877 2885 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
2878 2886 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
2879 2887 # openrevlog() should cause failure, because it has been
2880 2888 # available since 3.5 (or 49c583ca48c4).
2881 2889 def openrevlog(orig, repo, cmd, file_, opts):
2882 2890 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
2883 2891 raise error.Abort(b"This version doesn't support --dir option",
2884 2892 hint=b"use 3.5 or later")
2885 2893 return orig(repo, cmd, file_, opts)
2886 2894 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
2887 2895
2888 2896 @command(b'perfprogress', formatteropts + [
2889 2897 (b'', b'topic', b'topic', b'topic for progress messages'),
2890 2898 (b'c', b'total', 1000000, b'total value we are progressing to'),
2891 2899 ], norepo=True)
2892 2900 def perfprogress(ui, topic=None, total=None, **opts):
2893 2901 """printing of progress bars"""
2894 2902 opts = _byteskwargs(opts)
2895 2903
2896 2904 timer, fm = gettimer(ui, opts)
2897 2905
2898 2906 def doprogress():
2899 2907 with ui.makeprogress(topic, total=total) as progress:
2900 2908 for i in pycompat.xrange(total):
2901 2909 progress.increment()
2902 2910
2903 2911 timer(doprogress)
2904 2912 fm.end()
General Comments 0
You need to be logged in to leave comments. Login now