##// END OF EJS Templates
perf: make sure to explicitly disable any profiler after the first iteration...
marmoute -
r42556:a09829e1 default
parent child Browse files
Show More
@@ -1,2901 +1,2904 b''
1 1 # perf.py - performance test routines
2 2 '''helper extension to measure performance
3 3
4 4 Configurations
5 5 ==============
6 6
7 7 ``perf``
8 8 --------
9 9
10 10 ``all-timing``
11 11 When set, additional statistics will be reported for each benchmark: best,
12 12 worst, median average. If not set only the best timing is reported
13 13 (default: off).
14 14
15 15 ``presleep``
16 16 number of second to wait before any group of runs (default: 1)
17 17
18 18 ``pre-run``
19 19 number of run to perform before starting measurement.
20 20
21 21 ``profile-benchmark``
22 22 Enable profiling for the benchmarked section.
23 23 (The first iteration is benchmarked)
24 24
25 25 ``run-limits``
26 26 Control the number of runs each benchmark will perform. The option value
27 27 should be a list of `<time>-<numberofrun>` pairs. After each run the
28 28 conditions are considered in order with the following logic:
29 29
30 30 If benchmark has been running for <time> seconds, and we have performed
31 31 <numberofrun> iterations, stop the benchmark,
32 32
33 33 The default value is: `3.0-100, 10.0-3`
34 34
35 35 ``stub``
36 36 When set, benchmarks will only be run once, useful for testing
37 37 (default: off)
38 38 '''
39 39
40 40 # "historical portability" policy of perf.py:
41 41 #
42 42 # We have to do:
43 43 # - make perf.py "loadable" with as wide Mercurial version as possible
44 44 # This doesn't mean that perf commands work correctly with that Mercurial.
45 45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
46 46 # - make historical perf command work correctly with as wide Mercurial
47 47 # version as possible
48 48 #
49 49 # We have to do, if possible with reasonable cost:
50 50 # - make recent perf command for historical feature work correctly
51 51 # with early Mercurial
52 52 #
53 53 # We don't have to do:
54 54 # - make perf command for recent feature work correctly with early
55 55 # Mercurial
56 56
57 57 from __future__ import absolute_import
58 58 import contextlib
59 59 import functools
60 60 import gc
61 61 import os
62 62 import random
63 63 import shutil
64 64 import struct
65 65 import sys
66 66 import tempfile
67 67 import threading
68 68 import time
69 69 from mercurial import (
70 70 changegroup,
71 71 cmdutil,
72 72 commands,
73 73 copies,
74 74 error,
75 75 extensions,
76 76 hg,
77 77 mdiff,
78 78 merge,
79 79 revlog,
80 80 util,
81 81 )
82 82
83 83 # for "historical portability":
84 84 # try to import modules separately (in dict order), and ignore
85 85 # failure, because these aren't available with early Mercurial
86 86 try:
87 87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
88 88 except ImportError:
89 89 pass
90 90 try:
91 91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
92 92 except ImportError:
93 93 pass
94 94 try:
95 95 from mercurial import registrar # since 3.7 (or 37d50250b696)
96 96 dir(registrar) # forcibly load it
97 97 except ImportError:
98 98 registrar = None
99 99 try:
100 100 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
101 101 except ImportError:
102 102 pass
103 103 try:
104 104 from mercurial.utils import repoviewutil # since 5.0
105 105 except ImportError:
106 106 repoviewutil = None
107 107 try:
108 108 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
109 109 except ImportError:
110 110 pass
111 111 try:
112 112 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
113 113 except ImportError:
114 114 pass
115 115
116 116 try:
117 117 from mercurial import profiling
118 118 except ImportError:
119 119 profiling = None
120 120
121 121 def identity(a):
122 122 return a
123 123
124 124 try:
125 125 from mercurial import pycompat
126 126 getargspec = pycompat.getargspec # added to module after 4.5
127 127 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
128 128 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
129 129 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
130 130 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
131 131 if pycompat.ispy3:
132 132 _maxint = sys.maxsize # per py3 docs for replacing maxint
133 133 else:
134 134 _maxint = sys.maxint
135 135 except (ImportError, AttributeError):
136 136 import inspect
137 137 getargspec = inspect.getargspec
138 138 _byteskwargs = identity
139 139 fsencode = identity # no py3 support
140 140 _maxint = sys.maxint # no py3 support
141 141 _sysstr = lambda x: x # no py3 support
142 142 _xrange = xrange
143 143
144 144 try:
145 145 # 4.7+
146 146 queue = pycompat.queue.Queue
147 147 except (AttributeError, ImportError):
148 148 # <4.7.
149 149 try:
150 150 queue = pycompat.queue
151 151 except (AttributeError, ImportError):
152 152 queue = util.queue
153 153
154 154 try:
155 155 from mercurial import logcmdutil
156 156 makelogtemplater = logcmdutil.maketemplater
157 157 except (AttributeError, ImportError):
158 158 try:
159 159 makelogtemplater = cmdutil.makelogtemplater
160 160 except (AttributeError, ImportError):
161 161 makelogtemplater = None
162 162
163 163 # for "historical portability":
164 164 # define util.safehasattr forcibly, because util.safehasattr has been
165 165 # available since 1.9.3 (or 94b200a11cf7)
166 166 _undefined = object()
167 167 def safehasattr(thing, attr):
168 168 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
169 169 setattr(util, 'safehasattr', safehasattr)
170 170
171 171 # for "historical portability":
172 172 # define util.timer forcibly, because util.timer has been available
173 173 # since ae5d60bb70c9
174 174 if safehasattr(time, 'perf_counter'):
175 175 util.timer = time.perf_counter
176 176 elif os.name == b'nt':
177 177 util.timer = time.clock
178 178 else:
179 179 util.timer = time.time
180 180
181 181 # for "historical portability":
182 182 # use locally defined empty option list, if formatteropts isn't
183 183 # available, because commands.formatteropts has been available since
184 184 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
185 185 # available since 2.2 (or ae5f92e154d3)
186 186 formatteropts = getattr(cmdutil, "formatteropts",
187 187 getattr(commands, "formatteropts", []))
188 188
189 189 # for "historical portability":
190 190 # use locally defined option list, if debugrevlogopts isn't available,
191 191 # because commands.debugrevlogopts has been available since 3.7 (or
192 192 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
193 193 # since 1.9 (or a79fea6b3e77).
194 194 revlogopts = getattr(cmdutil, "debugrevlogopts",
195 195 getattr(commands, "debugrevlogopts", [
196 196 (b'c', b'changelog', False, (b'open changelog')),
197 197 (b'm', b'manifest', False, (b'open manifest')),
198 198 (b'', b'dir', False, (b'open directory manifest')),
199 199 ]))
200 200
201 201 cmdtable = {}
202 202
203 203 # for "historical portability":
204 204 # define parsealiases locally, because cmdutil.parsealiases has been
205 205 # available since 1.5 (or 6252852b4332)
206 206 def parsealiases(cmd):
207 207 return cmd.split(b"|")
208 208
209 209 if safehasattr(registrar, 'command'):
210 210 command = registrar.command(cmdtable)
211 211 elif safehasattr(cmdutil, 'command'):
212 212 command = cmdutil.command(cmdtable)
213 213 if b'norepo' not in getargspec(command).args:
214 214 # for "historical portability":
215 215 # wrap original cmdutil.command, because "norepo" option has
216 216 # been available since 3.1 (or 75a96326cecb)
217 217 _command = command
218 218 def command(name, options=(), synopsis=None, norepo=False):
219 219 if norepo:
220 220 commands.norepo += b' %s' % b' '.join(parsealiases(name))
221 221 return _command(name, list(options), synopsis)
222 222 else:
223 223 # for "historical portability":
224 224 # define "@command" annotation locally, because cmdutil.command
225 225 # has been available since 1.9 (or 2daa5179e73f)
226 226 def command(name, options=(), synopsis=None, norepo=False):
227 227 def decorator(func):
228 228 if synopsis:
229 229 cmdtable[name] = func, list(options), synopsis
230 230 else:
231 231 cmdtable[name] = func, list(options)
232 232 if norepo:
233 233 commands.norepo += b' %s' % b' '.join(parsealiases(name))
234 234 return func
235 235 return decorator
236 236
237 237 try:
238 238 import mercurial.registrar
239 239 import mercurial.configitems
240 240 configtable = {}
241 241 configitem = mercurial.registrar.configitem(configtable)
242 242 configitem(b'perf', b'presleep',
243 243 default=mercurial.configitems.dynamicdefault,
244 244 )
245 245 configitem(b'perf', b'stub',
246 246 default=mercurial.configitems.dynamicdefault,
247 247 )
248 248 configitem(b'perf', b'parentscount',
249 249 default=mercurial.configitems.dynamicdefault,
250 250 )
251 251 configitem(b'perf', b'all-timing',
252 252 default=mercurial.configitems.dynamicdefault,
253 253 )
254 254 configitem(b'perf', b'pre-run',
255 255 default=mercurial.configitems.dynamicdefault,
256 256 )
257 257 configitem(b'perf', b'profile-benchmark',
258 258 default=mercurial.configitems.dynamicdefault,
259 259 )
260 260 configitem(b'perf', b'run-limits',
261 261 default=mercurial.configitems.dynamicdefault,
262 262 )
263 263 except (ImportError, AttributeError):
264 264 pass
265 265
266 266 def getlen(ui):
267 267 if ui.configbool(b"perf", b"stub", False):
268 268 return lambda x: 1
269 269 return len
270 270
271 271 class noop(object):
272 272 """dummy context manager"""
273 273 def __enter__(self):
274 274 pass
275 275 def __exit__(self, *args):
276 276 pass
277 277
278 NOOPCTX = noop()
279
278 280 def gettimer(ui, opts=None):
279 281 """return a timer function and formatter: (timer, formatter)
280 282
281 283 This function exists to gather the creation of formatter in a single
282 284 place instead of duplicating it in all performance commands."""
283 285
284 286 # enforce an idle period before execution to counteract power management
285 287 # experimental config: perf.presleep
286 288 time.sleep(getint(ui, b"perf", b"presleep", 1))
287 289
288 290 if opts is None:
289 291 opts = {}
290 292 # redirect all to stderr unless buffer api is in use
291 293 if not ui._buffers:
292 294 ui = ui.copy()
293 295 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
294 296 if uifout:
295 297 # for "historical portability":
296 298 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
297 299 uifout.set(ui.ferr)
298 300
299 301 # get a formatter
300 302 uiformatter = getattr(ui, 'formatter', None)
301 303 if uiformatter:
302 304 fm = uiformatter(b'perf', opts)
303 305 else:
304 306 # for "historical portability":
305 307 # define formatter locally, because ui.formatter has been
306 308 # available since 2.2 (or ae5f92e154d3)
307 309 from mercurial import node
308 310 class defaultformatter(object):
309 311 """Minimized composition of baseformatter and plainformatter
310 312 """
311 313 def __init__(self, ui, topic, opts):
312 314 self._ui = ui
313 315 if ui.debugflag:
314 316 self.hexfunc = node.hex
315 317 else:
316 318 self.hexfunc = node.short
317 319 def __nonzero__(self):
318 320 return False
319 321 __bool__ = __nonzero__
320 322 def startitem(self):
321 323 pass
322 324 def data(self, **data):
323 325 pass
324 326 def write(self, fields, deftext, *fielddata, **opts):
325 327 self._ui.write(deftext % fielddata, **opts)
326 328 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
327 329 if cond:
328 330 self._ui.write(deftext % fielddata, **opts)
329 331 def plain(self, text, **opts):
330 332 self._ui.write(text, **opts)
331 333 def end(self):
332 334 pass
333 335 fm = defaultformatter(ui, b'perf', opts)
334 336
335 337 # stub function, runs code only once instead of in a loop
336 338 # experimental config: perf.stub
337 339 if ui.configbool(b"perf", b"stub", False):
338 340 return functools.partial(stub_timer, fm), fm
339 341
340 342 # experimental config: perf.all-timing
341 343 displayall = ui.configbool(b"perf", b"all-timing", False)
342 344
343 345 # experimental config: perf.run-limits
344 346 limitspec = ui.configlist(b"perf", b"run-limits", [])
345 347 limits = []
346 348 for item in limitspec:
347 349 parts = item.split(b'-', 1)
348 350 if len(parts) < 2:
349 351 ui.warn((b'malformatted run limit entry, missing "-": %s\n'
350 352 % item))
351 353 continue
352 354 try:
353 355 time_limit = float(pycompat.sysstr(parts[0]))
354 356 except ValueError as e:
355 357 ui.warn((b'malformatted run limit entry, %s: %s\n'
356 358 % (pycompat.bytestr(e), item)))
357 359 continue
358 360 try:
359 361 run_limit = int(pycompat.sysstr(parts[1]))
360 362 except ValueError as e:
361 363 ui.warn((b'malformatted run limit entry, %s: %s\n'
362 364 % (pycompat.bytestr(e), item)))
363 365 continue
364 366 limits.append((time_limit, run_limit))
365 367 if not limits:
366 368 limits = DEFAULTLIMITS
367 369
368 370 profiler = None
369 371 if profiling is not None:
370 372 if ui.configbool(b"perf", b"profile-benchmark", False):
371 373 profiler = profiling.profile(ui)
372 374
373 375 prerun = getint(ui, b"perf", b"pre-run", 0)
374 376 t = functools.partial(_timer, fm, displayall=displayall, limits=limits,
375 377 prerun=prerun, profiler=profiler)
376 378 return t, fm
377 379
378 380 def stub_timer(fm, func, setup=None, title=None):
379 381 if setup is not None:
380 382 setup()
381 383 func()
382 384
383 385 @contextlib.contextmanager
384 386 def timeone():
385 387 r = []
386 388 ostart = os.times()
387 389 cstart = util.timer()
388 390 yield r
389 391 cstop = util.timer()
390 392 ostop = os.times()
391 393 a, b = ostart, ostop
392 394 r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
393 395
394 396
395 397 # list of stop condition (elapsed time, minimal run count)
396 398 DEFAULTLIMITS = (
397 399 (3.0, 100),
398 400 (10.0, 3),
399 401 )
400 402
401 403 def _timer(fm, func, setup=None, title=None, displayall=False,
402 404 limits=DEFAULTLIMITS, prerun=0, profiler=None):
403 405 gc.collect()
404 406 results = []
405 407 begin = util.timer()
406 408 count = 0
407 409 if profiler is None:
408 profiler = noop()
410 profiler = NOOPCTX
409 411 for i in xrange(prerun):
410 412 if setup is not None:
411 413 setup()
412 414 func()
413 415 keepgoing = True
414 416 while keepgoing:
415 417 if setup is not None:
416 418 setup()
417 419 with profiler:
418 420 with timeone() as item:
419 421 r = func()
422 profiler = NOOPCTX
420 423 count += 1
421 424 results.append(item[0])
422 425 cstop = util.timer()
423 426 # Look for a stop condition.
424 427 elapsed = cstop - begin
425 428 for t, mincount in limits:
426 429 if elapsed >= t and count >= mincount:
427 430 keepgoing = False
428 431 break
429 432
430 433 formatone(fm, results, title=title, result=r,
431 434 displayall=displayall)
432 435
433 436 def formatone(fm, timings, title=None, result=None, displayall=False):
434 437
435 438 count = len(timings)
436 439
437 440 fm.startitem()
438 441
439 442 if title:
440 443 fm.write(b'title', b'! %s\n', title)
441 444 if result:
442 445 fm.write(b'result', b'! result: %s\n', result)
443 446 def display(role, entry):
444 447 prefix = b''
445 448 if role != b'best':
446 449 prefix = b'%s.' % role
447 450 fm.plain(b'!')
448 451 fm.write(prefix + b'wall', b' wall %f', entry[0])
449 452 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
450 453 fm.write(prefix + b'user', b' user %f', entry[1])
451 454 fm.write(prefix + b'sys', b' sys %f', entry[2])
452 455 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
453 456 fm.plain(b'\n')
454 457 timings.sort()
455 458 min_val = timings[0]
456 459 display(b'best', min_val)
457 460 if displayall:
458 461 max_val = timings[-1]
459 462 display(b'max', max_val)
460 463 avg = tuple([sum(x) / count for x in zip(*timings)])
461 464 display(b'avg', avg)
462 465 median = timings[len(timings) // 2]
463 466 display(b'median', median)
464 467
465 468 # utilities for historical portability
466 469
467 470 def getint(ui, section, name, default):
468 471 # for "historical portability":
469 472 # ui.configint has been available since 1.9 (or fa2b596db182)
470 473 v = ui.config(section, name, None)
471 474 if v is None:
472 475 return default
473 476 try:
474 477 return int(v)
475 478 except ValueError:
476 479 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
477 480 % (section, name, v))
478 481
479 482 def safeattrsetter(obj, name, ignoremissing=False):
480 483 """Ensure that 'obj' has 'name' attribute before subsequent setattr
481 484
482 485 This function is aborted, if 'obj' doesn't have 'name' attribute
483 486 at runtime. This avoids overlooking removal of an attribute, which
484 487 breaks assumption of performance measurement, in the future.
485 488
486 489 This function returns the object to (1) assign a new value, and
487 490 (2) restore an original value to the attribute.
488 491
489 492 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
490 493 abortion, and this function returns None. This is useful to
491 494 examine an attribute, which isn't ensured in all Mercurial
492 495 versions.
493 496 """
494 497 if not util.safehasattr(obj, name):
495 498 if ignoremissing:
496 499 return None
497 500 raise error.Abort((b"missing attribute %s of %s might break assumption"
498 501 b" of performance measurement") % (name, obj))
499 502
500 503 origvalue = getattr(obj, _sysstr(name))
501 504 class attrutil(object):
502 505 def set(self, newvalue):
503 506 setattr(obj, _sysstr(name), newvalue)
504 507 def restore(self):
505 508 setattr(obj, _sysstr(name), origvalue)
506 509
507 510 return attrutil()
508 511
509 512 # utilities to examine each internal API changes
510 513
511 514 def getbranchmapsubsettable():
512 515 # for "historical portability":
513 516 # subsettable is defined in:
514 517 # - branchmap since 2.9 (or 175c6fd8cacc)
515 518 # - repoview since 2.5 (or 59a9f18d4587)
516 519 # - repoviewutil since 5.0
517 520 for mod in (branchmap, repoview, repoviewutil):
518 521 subsettable = getattr(mod, 'subsettable', None)
519 522 if subsettable:
520 523 return subsettable
521 524
522 525 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
523 526 # branchmap and repoview modules exist, but subsettable attribute
524 527 # doesn't)
525 528 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
526 529 hint=b"use 2.5 or later")
527 530
528 531 def getsvfs(repo):
529 532 """Return appropriate object to access files under .hg/store
530 533 """
531 534 # for "historical portability":
532 535 # repo.svfs has been available since 2.3 (or 7034365089bf)
533 536 svfs = getattr(repo, 'svfs', None)
534 537 if svfs:
535 538 return svfs
536 539 else:
537 540 return getattr(repo, 'sopener')
538 541
539 542 def getvfs(repo):
540 543 """Return appropriate object to access files under .hg
541 544 """
542 545 # for "historical portability":
543 546 # repo.vfs has been available since 2.3 (or 7034365089bf)
544 547 vfs = getattr(repo, 'vfs', None)
545 548 if vfs:
546 549 return vfs
547 550 else:
548 551 return getattr(repo, 'opener')
549 552
550 553 def repocleartagscachefunc(repo):
551 554 """Return the function to clear tags cache according to repo internal API
552 555 """
553 556 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
554 557 # in this case, setattr(repo, '_tagscache', None) or so isn't
555 558 # correct way to clear tags cache, because existing code paths
556 559 # expect _tagscache to be a structured object.
557 560 def clearcache():
558 561 # _tagscache has been filteredpropertycache since 2.5 (or
559 562 # 98c867ac1330), and delattr() can't work in such case
560 563 if b'_tagscache' in vars(repo):
561 564 del repo.__dict__[b'_tagscache']
562 565 return clearcache
563 566
564 567 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
565 568 if repotags: # since 1.4 (or 5614a628d173)
566 569 return lambda : repotags.set(None)
567 570
568 571 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
569 572 if repotagscache: # since 0.6 (or d7df759d0e97)
570 573 return lambda : repotagscache.set(None)
571 574
572 575 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
573 576 # this point, but it isn't so problematic, because:
574 577 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
575 578 # in perftags() causes failure soon
576 579 # - perf.py itself has been available since 1.1 (or eb240755386d)
577 580 raise error.Abort((b"tags API of this hg command is unknown"))
578 581
579 582 # utilities to clear cache
580 583
581 584 def clearfilecache(obj, attrname):
582 585 unfiltered = getattr(obj, 'unfiltered', None)
583 586 if unfiltered is not None:
584 587 obj = obj.unfiltered()
585 588 if attrname in vars(obj):
586 589 delattr(obj, attrname)
587 590 obj._filecache.pop(attrname, None)
588 591
589 592 def clearchangelog(repo):
590 593 if repo is not repo.unfiltered():
591 594 object.__setattr__(repo, r'_clcachekey', None)
592 595 object.__setattr__(repo, r'_clcache', None)
593 596 clearfilecache(repo.unfiltered(), 'changelog')
594 597
595 598 # perf commands
596 599
597 600 @command(b'perfwalk', formatteropts)
598 601 def perfwalk(ui, repo, *pats, **opts):
599 602 opts = _byteskwargs(opts)
600 603 timer, fm = gettimer(ui, opts)
601 604 m = scmutil.match(repo[None], pats, {})
602 605 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
603 606 ignored=False))))
604 607 fm.end()
605 608
606 609 @command(b'perfannotate', formatteropts)
607 610 def perfannotate(ui, repo, f, **opts):
608 611 opts = _byteskwargs(opts)
609 612 timer, fm = gettimer(ui, opts)
610 613 fc = repo[b'.'][f]
611 614 timer(lambda: len(fc.annotate(True)))
612 615 fm.end()
613 616
614 617 @command(b'perfstatus',
615 618 [(b'u', b'unknown', False,
616 619 b'ask status to look for unknown files')] + formatteropts)
617 620 def perfstatus(ui, repo, **opts):
618 621 opts = _byteskwargs(opts)
619 622 #m = match.always(repo.root, repo.getcwd())
620 623 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
621 624 # False))))
622 625 timer, fm = gettimer(ui, opts)
623 626 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
624 627 fm.end()
625 628
626 629 @command(b'perfaddremove', formatteropts)
627 630 def perfaddremove(ui, repo, **opts):
628 631 opts = _byteskwargs(opts)
629 632 timer, fm = gettimer(ui, opts)
630 633 try:
631 634 oldquiet = repo.ui.quiet
632 635 repo.ui.quiet = True
633 636 matcher = scmutil.match(repo[None])
634 637 opts[b'dry_run'] = True
635 638 if b'uipathfn' in getargspec(scmutil.addremove).args:
636 639 uipathfn = scmutil.getuipathfn(repo)
637 640 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
638 641 else:
639 642 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
640 643 finally:
641 644 repo.ui.quiet = oldquiet
642 645 fm.end()
643 646
644 647 def clearcaches(cl):
645 648 # behave somewhat consistently across internal API changes
646 649 if util.safehasattr(cl, b'clearcaches'):
647 650 cl.clearcaches()
648 651 elif util.safehasattr(cl, b'_nodecache'):
649 652 from mercurial.node import nullid, nullrev
650 653 cl._nodecache = {nullid: nullrev}
651 654 cl._nodepos = None
652 655
653 656 @command(b'perfheads', formatteropts)
654 657 def perfheads(ui, repo, **opts):
655 658 """benchmark the computation of a changelog heads"""
656 659 opts = _byteskwargs(opts)
657 660 timer, fm = gettimer(ui, opts)
658 661 cl = repo.changelog
659 662 def s():
660 663 clearcaches(cl)
661 664 def d():
662 665 len(cl.headrevs())
663 666 timer(d, setup=s)
664 667 fm.end()
665 668
666 669 @command(b'perftags', formatteropts+
667 670 [
668 671 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
669 672 ])
670 673 def perftags(ui, repo, **opts):
671 674 opts = _byteskwargs(opts)
672 675 timer, fm = gettimer(ui, opts)
673 676 repocleartagscache = repocleartagscachefunc(repo)
674 677 clearrevlogs = opts[b'clear_revlogs']
675 678 def s():
676 679 if clearrevlogs:
677 680 clearchangelog(repo)
678 681 clearfilecache(repo.unfiltered(), 'manifest')
679 682 repocleartagscache()
680 683 def t():
681 684 return len(repo.tags())
682 685 timer(t, setup=s)
683 686 fm.end()
684 687
685 688 @command(b'perfancestors', formatteropts)
686 689 def perfancestors(ui, repo, **opts):
687 690 opts = _byteskwargs(opts)
688 691 timer, fm = gettimer(ui, opts)
689 692 heads = repo.changelog.headrevs()
690 693 def d():
691 694 for a in repo.changelog.ancestors(heads):
692 695 pass
693 696 timer(d)
694 697 fm.end()
695 698
696 699 @command(b'perfancestorset', formatteropts)
697 700 def perfancestorset(ui, repo, revset, **opts):
698 701 opts = _byteskwargs(opts)
699 702 timer, fm = gettimer(ui, opts)
700 703 revs = repo.revs(revset)
701 704 heads = repo.changelog.headrevs()
702 705 def d():
703 706 s = repo.changelog.ancestors(heads)
704 707 for rev in revs:
705 708 rev in s
706 709 timer(d)
707 710 fm.end()
708 711
709 712 @command(b'perfdiscovery', formatteropts, b'PATH')
710 713 def perfdiscovery(ui, repo, path, **opts):
711 714 """benchmark discovery between local repo and the peer at given path
712 715 """
713 716 repos = [repo, None]
714 717 timer, fm = gettimer(ui, opts)
715 718 path = ui.expandpath(path)
716 719
717 720 def s():
718 721 repos[1] = hg.peer(ui, opts, path)
719 722 def d():
720 723 setdiscovery.findcommonheads(ui, *repos)
721 724 timer(d, setup=s)
722 725 fm.end()
723 726
724 727 @command(b'perfbookmarks', formatteropts +
725 728 [
726 729 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
727 730 ])
728 731 def perfbookmarks(ui, repo, **opts):
729 732 """benchmark parsing bookmarks from disk to memory"""
730 733 opts = _byteskwargs(opts)
731 734 timer, fm = gettimer(ui, opts)
732 735
733 736 clearrevlogs = opts[b'clear_revlogs']
734 737 def s():
735 738 if clearrevlogs:
736 739 clearchangelog(repo)
737 740 clearfilecache(repo, b'_bookmarks')
738 741 def d():
739 742 repo._bookmarks
740 743 timer(d, setup=s)
741 744 fm.end()
742 745
743 746 @command(b'perfbundleread', formatteropts, b'BUNDLE')
744 747 def perfbundleread(ui, repo, bundlepath, **opts):
745 748 """Benchmark reading of bundle files.
746 749
747 750 This command is meant to isolate the I/O part of bundle reading as
748 751 much as possible.
749 752 """
750 753 from mercurial import (
751 754 bundle2,
752 755 exchange,
753 756 streamclone,
754 757 )
755 758
756 759 opts = _byteskwargs(opts)
757 760
758 761 def makebench(fn):
759 762 def run():
760 763 with open(bundlepath, b'rb') as fh:
761 764 bundle = exchange.readbundle(ui, fh, bundlepath)
762 765 fn(bundle)
763 766
764 767 return run
765 768
766 769 def makereadnbytes(size):
767 770 def run():
768 771 with open(bundlepath, b'rb') as fh:
769 772 bundle = exchange.readbundle(ui, fh, bundlepath)
770 773 while bundle.read(size):
771 774 pass
772 775
773 776 return run
774 777
775 778 def makestdioread(size):
776 779 def run():
777 780 with open(bundlepath, b'rb') as fh:
778 781 while fh.read(size):
779 782 pass
780 783
781 784 return run
782 785
783 786 # bundle1
784 787
785 788 def deltaiter(bundle):
786 789 for delta in bundle.deltaiter():
787 790 pass
788 791
789 792 def iterchunks(bundle):
790 793 for chunk in bundle.getchunks():
791 794 pass
792 795
793 796 # bundle2
794 797
795 798 def forwardchunks(bundle):
796 799 for chunk in bundle._forwardchunks():
797 800 pass
798 801
799 802 def iterparts(bundle):
800 803 for part in bundle.iterparts():
801 804 pass
802 805
803 806 def iterpartsseekable(bundle):
804 807 for part in bundle.iterparts(seekable=True):
805 808 pass
806 809
807 810 def seek(bundle):
808 811 for part in bundle.iterparts(seekable=True):
809 812 part.seek(0, os.SEEK_END)
810 813
811 814 def makepartreadnbytes(size):
812 815 def run():
813 816 with open(bundlepath, b'rb') as fh:
814 817 bundle = exchange.readbundle(ui, fh, bundlepath)
815 818 for part in bundle.iterparts():
816 819 while part.read(size):
817 820 pass
818 821
819 822 return run
820 823
821 824 benches = [
822 825 (makestdioread(8192), b'read(8k)'),
823 826 (makestdioread(16384), b'read(16k)'),
824 827 (makestdioread(32768), b'read(32k)'),
825 828 (makestdioread(131072), b'read(128k)'),
826 829 ]
827 830
828 831 with open(bundlepath, b'rb') as fh:
829 832 bundle = exchange.readbundle(ui, fh, bundlepath)
830 833
831 834 if isinstance(bundle, changegroup.cg1unpacker):
832 835 benches.extend([
833 836 (makebench(deltaiter), b'cg1 deltaiter()'),
834 837 (makebench(iterchunks), b'cg1 getchunks()'),
835 838 (makereadnbytes(8192), b'cg1 read(8k)'),
836 839 (makereadnbytes(16384), b'cg1 read(16k)'),
837 840 (makereadnbytes(32768), b'cg1 read(32k)'),
838 841 (makereadnbytes(131072), b'cg1 read(128k)'),
839 842 ])
840 843 elif isinstance(bundle, bundle2.unbundle20):
841 844 benches.extend([
842 845 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
843 846 (makebench(iterparts), b'bundle2 iterparts()'),
844 847 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
845 848 (makebench(seek), b'bundle2 part seek()'),
846 849 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
847 850 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
848 851 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
849 852 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
850 853 ])
851 854 elif isinstance(bundle, streamclone.streamcloneapplier):
852 855 raise error.Abort(b'stream clone bundles not supported')
853 856 else:
854 857 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
855 858
856 859 for fn, title in benches:
857 860 timer, fm = gettimer(ui, opts)
858 861 timer(fn, title=title)
859 862 fm.end()
860 863
861 864 @command(b'perfchangegroupchangelog', formatteropts +
862 865 [(b'', b'cgversion', b'02', b'changegroup version'),
863 866 (b'r', b'rev', b'', b'revisions to add to changegroup')])
864 867 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
865 868 """Benchmark producing a changelog group for a changegroup.
866 869
867 870 This measures the time spent processing the changelog during a
868 871 bundle operation. This occurs during `hg bundle` and on a server
869 872 processing a `getbundle` wire protocol request (handles clones
870 873 and pull requests).
871 874
872 875 By default, all revisions are added to the changegroup.
873 876 """
874 877 opts = _byteskwargs(opts)
875 878 cl = repo.changelog
876 879 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
877 880 bundler = changegroup.getbundler(cgversion, repo)
878 881
879 882 def d():
880 883 state, chunks = bundler._generatechangelog(cl, nodes)
881 884 for chunk in chunks:
882 885 pass
883 886
884 887 timer, fm = gettimer(ui, opts)
885 888
886 889 # Terminal printing can interfere with timing. So disable it.
887 890 with ui.configoverride({(b'progress', b'disable'): True}):
888 891 timer(d)
889 892
890 893 fm.end()
891 894
892 895 @command(b'perfdirs', formatteropts)
893 896 def perfdirs(ui, repo, **opts):
894 897 opts = _byteskwargs(opts)
895 898 timer, fm = gettimer(ui, opts)
896 899 dirstate = repo.dirstate
897 900 b'a' in dirstate
898 901 def d():
899 902 dirstate.hasdir(b'a')
900 903 del dirstate._map._dirs
901 904 timer(d)
902 905 fm.end()
903 906
904 907 @command(b'perfdirstate', formatteropts)
905 908 def perfdirstate(ui, repo, **opts):
906 909 opts = _byteskwargs(opts)
907 910 timer, fm = gettimer(ui, opts)
908 911 b"a" in repo.dirstate
909 912 def d():
910 913 repo.dirstate.invalidate()
911 914 b"a" in repo.dirstate
912 915 timer(d)
913 916 fm.end()
914 917
915 918 @command(b'perfdirstatedirs', formatteropts)
916 919 def perfdirstatedirs(ui, repo, **opts):
917 920 opts = _byteskwargs(opts)
918 921 timer, fm = gettimer(ui, opts)
919 922 b"a" in repo.dirstate
920 923 def d():
921 924 repo.dirstate.hasdir(b"a")
922 925 del repo.dirstate._map._dirs
923 926 timer(d)
924 927 fm.end()
925 928
926 929 @command(b'perfdirstatefoldmap', formatteropts)
927 930 def perfdirstatefoldmap(ui, repo, **opts):
928 931 opts = _byteskwargs(opts)
929 932 timer, fm = gettimer(ui, opts)
930 933 dirstate = repo.dirstate
931 934 b'a' in dirstate
932 935 def d():
933 936 dirstate._map.filefoldmap.get(b'a')
934 937 del dirstate._map.filefoldmap
935 938 timer(d)
936 939 fm.end()
937 940
938 941 @command(b'perfdirfoldmap', formatteropts)
939 942 def perfdirfoldmap(ui, repo, **opts):
940 943 opts = _byteskwargs(opts)
941 944 timer, fm = gettimer(ui, opts)
942 945 dirstate = repo.dirstate
943 946 b'a' in dirstate
944 947 def d():
945 948 dirstate._map.dirfoldmap.get(b'a')
946 949 del dirstate._map.dirfoldmap
947 950 del dirstate._map._dirs
948 951 timer(d)
949 952 fm.end()
950 953
951 954 @command(b'perfdirstatewrite', formatteropts)
952 955 def perfdirstatewrite(ui, repo, **opts):
953 956 opts = _byteskwargs(opts)
954 957 timer, fm = gettimer(ui, opts)
955 958 ds = repo.dirstate
956 959 b"a" in ds
957 960 def d():
958 961 ds._dirty = True
959 962 ds.write(repo.currenttransaction())
960 963 timer(d)
961 964 fm.end()
962 965
963 966 @command(b'perfmergecalculate',
964 967 [(b'r', b'rev', b'.', b'rev to merge against')] + formatteropts)
965 968 def perfmergecalculate(ui, repo, rev, **opts):
966 969 opts = _byteskwargs(opts)
967 970 timer, fm = gettimer(ui, opts)
968 971 wctx = repo[None]
969 972 rctx = scmutil.revsingle(repo, rev, rev)
970 973 ancestor = wctx.ancestor(rctx)
971 974 # we don't want working dir files to be stat'd in the benchmark, so prime
972 975 # that cache
973 976 wctx.dirty()
974 977 def d():
975 978 # acceptremote is True because we don't want prompts in the middle of
976 979 # our benchmark
977 980 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
978 981 acceptremote=True, followcopies=True)
979 982 timer(d)
980 983 fm.end()
981 984
982 985 @command(b'perfpathcopies', [], b"REV REV")
983 986 def perfpathcopies(ui, repo, rev1, rev2, **opts):
984 987 """benchmark the copy tracing logic"""
985 988 opts = _byteskwargs(opts)
986 989 timer, fm = gettimer(ui, opts)
987 990 ctx1 = scmutil.revsingle(repo, rev1, rev1)
988 991 ctx2 = scmutil.revsingle(repo, rev2, rev2)
989 992 def d():
990 993 copies.pathcopies(ctx1, ctx2)
991 994 timer(d)
992 995 fm.end()
993 996
994 997 @command(b'perfphases',
995 998 [(b'', b'full', False, b'include file reading time too'),
996 999 ], b"")
997 1000 def perfphases(ui, repo, **opts):
998 1001 """benchmark phasesets computation"""
999 1002 opts = _byteskwargs(opts)
1000 1003 timer, fm = gettimer(ui, opts)
1001 1004 _phases = repo._phasecache
1002 1005 full = opts.get(b'full')
1003 1006 def d():
1004 1007 phases = _phases
1005 1008 if full:
1006 1009 clearfilecache(repo, b'_phasecache')
1007 1010 phases = repo._phasecache
1008 1011 phases.invalidate()
1009 1012 phases.loadphaserevs(repo)
1010 1013 timer(d)
1011 1014 fm.end()
1012 1015
1013 1016 @command(b'perfphasesremote',
1014 1017 [], b"[DEST]")
1015 1018 def perfphasesremote(ui, repo, dest=None, **opts):
1016 1019 """benchmark time needed to analyse phases of the remote server"""
1017 1020 from mercurial.node import (
1018 1021 bin,
1019 1022 )
1020 1023 from mercurial import (
1021 1024 exchange,
1022 1025 hg,
1023 1026 phases,
1024 1027 )
1025 1028 opts = _byteskwargs(opts)
1026 1029 timer, fm = gettimer(ui, opts)
1027 1030
1028 1031 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
1029 1032 if not path:
1030 1033 raise error.Abort((b'default repository not configured!'),
1031 1034 hint=(b"see 'hg help config.paths'"))
1032 1035 dest = path.pushloc or path.loc
1033 1036 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
1034 1037 other = hg.peer(repo, opts, dest)
1035 1038
1036 1039 # easier to perform discovery through the operation
1037 1040 op = exchange.pushoperation(repo, other)
1038 1041 exchange._pushdiscoverychangeset(op)
1039 1042
1040 1043 remotesubset = op.fallbackheads
1041 1044
1042 1045 with other.commandexecutor() as e:
1043 1046 remotephases = e.callcommand(b'listkeys',
1044 1047 {b'namespace': b'phases'}).result()
1045 1048 del other
1046 1049 publishing = remotephases.get(b'publishing', False)
1047 1050 if publishing:
1048 1051 ui.status((b'publishing: yes\n'))
1049 1052 else:
1050 1053 ui.status((b'publishing: no\n'))
1051 1054
1052 1055 nodemap = repo.changelog.nodemap
1053 1056 nonpublishroots = 0
1054 1057 for nhex, phase in remotephases.iteritems():
1055 1058 if nhex == b'publishing': # ignore data related to publish option
1056 1059 continue
1057 1060 node = bin(nhex)
1058 1061 if node in nodemap and int(phase):
1059 1062 nonpublishroots += 1
1060 1063 ui.status((b'number of roots: %d\n') % len(remotephases))
1061 1064 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
1062 1065 def d():
1063 1066 phases.remotephasessummary(repo,
1064 1067 remotesubset,
1065 1068 remotephases)
1066 1069 timer(d)
1067 1070 fm.end()
1068 1071
1069 1072 @command(b'perfmanifest',[
1070 1073 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1071 1074 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1072 1075 ] + formatteropts, b'REV|NODE')
1073 1076 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1074 1077 """benchmark the time to read a manifest from disk and return a usable
1075 1078 dict-like object
1076 1079
1077 1080 Manifest caches are cleared before retrieval."""
1078 1081 opts = _byteskwargs(opts)
1079 1082 timer, fm = gettimer(ui, opts)
1080 1083 if not manifest_rev:
1081 1084 ctx = scmutil.revsingle(repo, rev, rev)
1082 1085 t = ctx.manifestnode()
1083 1086 else:
1084 1087 from mercurial.node import bin
1085 1088
1086 1089 if len(rev) == 40:
1087 1090 t = bin(rev)
1088 1091 else:
1089 1092 try:
1090 1093 rev = int(rev)
1091 1094
1092 1095 if util.safehasattr(repo.manifestlog, b'getstorage'):
1093 1096 t = repo.manifestlog.getstorage(b'').node(rev)
1094 1097 else:
1095 1098 t = repo.manifestlog._revlog.lookup(rev)
1096 1099 except ValueError:
1097 1100 raise error.Abort(b'manifest revision must be integer or full '
1098 1101 b'node')
1099 1102 def d():
1100 1103 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1101 1104 repo.manifestlog[t].read()
1102 1105 timer(d)
1103 1106 fm.end()
1104 1107
1105 1108 @command(b'perfchangeset', formatteropts)
1106 1109 def perfchangeset(ui, repo, rev, **opts):
1107 1110 opts = _byteskwargs(opts)
1108 1111 timer, fm = gettimer(ui, opts)
1109 1112 n = scmutil.revsingle(repo, rev).node()
1110 1113 def d():
1111 1114 repo.changelog.read(n)
1112 1115 #repo.changelog._cache = None
1113 1116 timer(d)
1114 1117 fm.end()
1115 1118
1116 1119 @command(b'perfignore', formatteropts)
1117 1120 def perfignore(ui, repo, **opts):
1118 1121 """benchmark operation related to computing ignore"""
1119 1122 opts = _byteskwargs(opts)
1120 1123 timer, fm = gettimer(ui, opts)
1121 1124 dirstate = repo.dirstate
1122 1125
1123 1126 def setupone():
1124 1127 dirstate.invalidate()
1125 1128 clearfilecache(dirstate, b'_ignore')
1126 1129
1127 1130 def runone():
1128 1131 dirstate._ignore
1129 1132
1130 1133 timer(runone, setup=setupone, title=b"load")
1131 1134 fm.end()
1132 1135
1133 1136 @command(b'perfindex', [
1134 1137 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1135 1138 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1136 1139 ] + formatteropts)
1137 1140 def perfindex(ui, repo, **opts):
1138 1141 """benchmark index creation time followed by a lookup
1139 1142
1140 1143 The default is to look `tip` up. Depending on the index implementation,
1141 1144 the revision looked up can matters. For example, an implementation
1142 1145 scanning the index will have a faster lookup time for `--rev tip` than for
1143 1146 `--rev 0`. The number of looked up revisions and their order can also
1144 1147 matters.
1145 1148
1146 1149 Example of useful set to test:
1147 1150 * tip
1148 1151 * 0
1149 1152 * -10:
1150 1153 * :10
1151 1154 * -10: + :10
1152 1155 * :10: + -10:
1153 1156 * -10000:
1154 1157 * -10000: + 0
1155 1158
1156 1159 It is not currently possible to check for lookup of a missing node. For
1157 1160 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1158 1161 import mercurial.revlog
1159 1162 opts = _byteskwargs(opts)
1160 1163 timer, fm = gettimer(ui, opts)
1161 1164 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1162 1165 if opts[b'no_lookup']:
1163 1166 if opts['rev']:
1164 1167 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1165 1168 nodes = []
1166 1169 elif not opts[b'rev']:
1167 1170 nodes = [repo[b"tip"].node()]
1168 1171 else:
1169 1172 revs = scmutil.revrange(repo, opts[b'rev'])
1170 1173 cl = repo.changelog
1171 1174 nodes = [cl.node(r) for r in revs]
1172 1175
1173 1176 unfi = repo.unfiltered()
1174 1177 # find the filecache func directly
1175 1178 # This avoid polluting the benchmark with the filecache logic
1176 1179 makecl = unfi.__class__.changelog.func
1177 1180 def setup():
1178 1181 # probably not necessary, but for good measure
1179 1182 clearchangelog(unfi)
1180 1183 def d():
1181 1184 cl = makecl(unfi)
1182 1185 for n in nodes:
1183 1186 cl.rev(n)
1184 1187 timer(d, setup=setup)
1185 1188 fm.end()
1186 1189
1187 1190 @command(b'perfnodemap', [
1188 1191 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1189 1192 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1190 1193 ] + formatteropts)
1191 1194 def perfnodemap(ui, repo, **opts):
1192 1195 """benchmark the time necessary to look up revision from a cold nodemap
1193 1196
1194 1197 Depending on the implementation, the amount and order of revision we look
1195 1198 up can varies. Example of useful set to test:
1196 1199 * tip
1197 1200 * 0
1198 1201 * -10:
1199 1202 * :10
1200 1203 * -10: + :10
1201 1204 * :10: + -10:
1202 1205 * -10000:
1203 1206 * -10000: + 0
1204 1207
1205 1208 The command currently focus on valid binary lookup. Benchmarking for
1206 1209 hexlookup, prefix lookup and missing lookup would also be valuable.
1207 1210 """
1208 1211 import mercurial.revlog
1209 1212 opts = _byteskwargs(opts)
1210 1213 timer, fm = gettimer(ui, opts)
1211 1214 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1212 1215
1213 1216 unfi = repo.unfiltered()
1214 1217 clearcaches = opts['clear_caches']
1215 1218 # find the filecache func directly
1216 1219 # This avoid polluting the benchmark with the filecache logic
1217 1220 makecl = unfi.__class__.changelog.func
1218 1221 if not opts[b'rev']:
1219 1222 raise error.Abort('use --rev to specify revisions to look up')
1220 1223 revs = scmutil.revrange(repo, opts[b'rev'])
1221 1224 cl = repo.changelog
1222 1225 nodes = [cl.node(r) for r in revs]
1223 1226
1224 1227 # use a list to pass reference to a nodemap from one closure to the next
1225 1228 nodeget = [None]
1226 1229 def setnodeget():
1227 1230 # probably not necessary, but for good measure
1228 1231 clearchangelog(unfi)
1229 1232 nodeget[0] = makecl(unfi).nodemap.get
1230 1233
1231 1234 def d():
1232 1235 get = nodeget[0]
1233 1236 for n in nodes:
1234 1237 get(n)
1235 1238
1236 1239 setup = None
1237 1240 if clearcaches:
1238 1241 def setup():
1239 1242 setnodeget()
1240 1243 else:
1241 1244 setnodeget()
1242 1245 d() # prewarm the data structure
1243 1246 timer(d, setup=setup)
1244 1247 fm.end()
1245 1248
1246 1249 @command(b'perfstartup', formatteropts)
1247 1250 def perfstartup(ui, repo, **opts):
1248 1251 opts = _byteskwargs(opts)
1249 1252 timer, fm = gettimer(ui, opts)
1250 1253 def d():
1251 1254 if os.name != r'nt':
1252 1255 os.system(b"HGRCPATH= %s version -q > /dev/null" %
1253 1256 fsencode(sys.argv[0]))
1254 1257 else:
1255 1258 os.environ[r'HGRCPATH'] = r' '
1256 1259 os.system(r"%s version -q > NUL" % sys.argv[0])
1257 1260 timer(d)
1258 1261 fm.end()
1259 1262
1260 1263 @command(b'perfparents', formatteropts)
1261 1264 def perfparents(ui, repo, **opts):
1262 1265 """benchmark the time necessary to fetch one changeset's parents.
1263 1266
1264 1267 The fetch is done using the `node identifier`, traversing all object layers
1265 1268 from the repository object. The first N revisions will be used for this
1266 1269 benchmark. N is controlled by the ``perf.parentscount`` config option
1267 1270 (default: 1000).
1268 1271 """
1269 1272 opts = _byteskwargs(opts)
1270 1273 timer, fm = gettimer(ui, opts)
1271 1274 # control the number of commits perfparents iterates over
1272 1275 # experimental config: perf.parentscount
1273 1276 count = getint(ui, b"perf", b"parentscount", 1000)
1274 1277 if len(repo.changelog) < count:
1275 1278 raise error.Abort(b"repo needs %d commits for this test" % count)
1276 1279 repo = repo.unfiltered()
1277 1280 nl = [repo.changelog.node(i) for i in _xrange(count)]
1278 1281 def d():
1279 1282 for n in nl:
1280 1283 repo.changelog.parents(n)
1281 1284 timer(d)
1282 1285 fm.end()
1283 1286
1284 1287 @command(b'perfctxfiles', formatteropts)
1285 1288 def perfctxfiles(ui, repo, x, **opts):
1286 1289 opts = _byteskwargs(opts)
1287 1290 x = int(x)
1288 1291 timer, fm = gettimer(ui, opts)
1289 1292 def d():
1290 1293 len(repo[x].files())
1291 1294 timer(d)
1292 1295 fm.end()
1293 1296
1294 1297 @command(b'perfrawfiles', formatteropts)
1295 1298 def perfrawfiles(ui, repo, x, **opts):
1296 1299 opts = _byteskwargs(opts)
1297 1300 x = int(x)
1298 1301 timer, fm = gettimer(ui, opts)
1299 1302 cl = repo.changelog
1300 1303 def d():
1301 1304 len(cl.read(x)[3])
1302 1305 timer(d)
1303 1306 fm.end()
1304 1307
1305 1308 @command(b'perflookup', formatteropts)
1306 1309 def perflookup(ui, repo, rev, **opts):
1307 1310 opts = _byteskwargs(opts)
1308 1311 timer, fm = gettimer(ui, opts)
1309 1312 timer(lambda: len(repo.lookup(rev)))
1310 1313 fm.end()
1311 1314
1312 1315 @command(b'perflinelogedits',
1313 1316 [(b'n', b'edits', 10000, b'number of edits'),
1314 1317 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1315 1318 ], norepo=True)
1316 1319 def perflinelogedits(ui, **opts):
1317 1320 from mercurial import linelog
1318 1321
1319 1322 opts = _byteskwargs(opts)
1320 1323
1321 1324 edits = opts[b'edits']
1322 1325 maxhunklines = opts[b'max_hunk_lines']
1323 1326
1324 1327 maxb1 = 100000
1325 1328 random.seed(0)
1326 1329 randint = random.randint
1327 1330 currentlines = 0
1328 1331 arglist = []
1329 1332 for rev in _xrange(edits):
1330 1333 a1 = randint(0, currentlines)
1331 1334 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1332 1335 b1 = randint(0, maxb1)
1333 1336 b2 = randint(b1, b1 + maxhunklines)
1334 1337 currentlines += (b2 - b1) - (a2 - a1)
1335 1338 arglist.append((rev, a1, a2, b1, b2))
1336 1339
1337 1340 def d():
1338 1341 ll = linelog.linelog()
1339 1342 for args in arglist:
1340 1343 ll.replacelines(*args)
1341 1344
1342 1345 timer, fm = gettimer(ui, opts)
1343 1346 timer(d)
1344 1347 fm.end()
1345 1348
1346 1349 @command(b'perfrevrange', formatteropts)
1347 1350 def perfrevrange(ui, repo, *specs, **opts):
1348 1351 opts = _byteskwargs(opts)
1349 1352 timer, fm = gettimer(ui, opts)
1350 1353 revrange = scmutil.revrange
1351 1354 timer(lambda: len(revrange(repo, specs)))
1352 1355 fm.end()
1353 1356
1354 1357 @command(b'perfnodelookup', formatteropts)
1355 1358 def perfnodelookup(ui, repo, rev, **opts):
1356 1359 opts = _byteskwargs(opts)
1357 1360 timer, fm = gettimer(ui, opts)
1358 1361 import mercurial.revlog
1359 1362 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1360 1363 n = scmutil.revsingle(repo, rev).node()
1361 1364 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1362 1365 def d():
1363 1366 cl.rev(n)
1364 1367 clearcaches(cl)
1365 1368 timer(d)
1366 1369 fm.end()
1367 1370
1368 1371 @command(b'perflog',
1369 1372 [(b'', b'rename', False, b'ask log to follow renames')
1370 1373 ] + formatteropts)
1371 1374 def perflog(ui, repo, rev=None, **opts):
1372 1375 opts = _byteskwargs(opts)
1373 1376 if rev is None:
1374 1377 rev=[]
1375 1378 timer, fm = gettimer(ui, opts)
1376 1379 ui.pushbuffer()
1377 1380 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1378 1381 copies=opts.get(b'rename')))
1379 1382 ui.popbuffer()
1380 1383 fm.end()
1381 1384
1382 1385 @command(b'perfmoonwalk', formatteropts)
1383 1386 def perfmoonwalk(ui, repo, **opts):
1384 1387 """benchmark walking the changelog backwards
1385 1388
1386 1389 This also loads the changelog data for each revision in the changelog.
1387 1390 """
1388 1391 opts = _byteskwargs(opts)
1389 1392 timer, fm = gettimer(ui, opts)
1390 1393 def moonwalk():
1391 1394 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1392 1395 ctx = repo[i]
1393 1396 ctx.branch() # read changelog data (in addition to the index)
1394 1397 timer(moonwalk)
1395 1398 fm.end()
1396 1399
1397 1400 @command(b'perftemplating',
1398 1401 [(b'r', b'rev', [], b'revisions to run the template on'),
1399 1402 ] + formatteropts)
1400 1403 def perftemplating(ui, repo, testedtemplate=None, **opts):
1401 1404 """test the rendering time of a given template"""
1402 1405 if makelogtemplater is None:
1403 1406 raise error.Abort((b"perftemplating not available with this Mercurial"),
1404 1407 hint=b"use 4.3 or later")
1405 1408
1406 1409 opts = _byteskwargs(opts)
1407 1410
1408 1411 nullui = ui.copy()
1409 1412 nullui.fout = open(os.devnull, r'wb')
1410 1413 nullui.disablepager()
1411 1414 revs = opts.get(b'rev')
1412 1415 if not revs:
1413 1416 revs = [b'all()']
1414 1417 revs = list(scmutil.revrange(repo, revs))
1415 1418
1416 1419 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1417 1420 b' {author|person}: {desc|firstline}\n')
1418 1421 if testedtemplate is None:
1419 1422 testedtemplate = defaulttemplate
1420 1423 displayer = makelogtemplater(nullui, repo, testedtemplate)
1421 1424 def format():
1422 1425 for r in revs:
1423 1426 ctx = repo[r]
1424 1427 displayer.show(ctx)
1425 1428 displayer.flush(ctx)
1426 1429
1427 1430 timer, fm = gettimer(ui, opts)
1428 1431 timer(format)
1429 1432 fm.end()
1430 1433
1431 1434 @command(b'perfhelper-pathcopies', formatteropts +
1432 1435 [
1433 1436 (b'r', b'revs', [], b'restrict search to these revisions'),
1434 1437 (b'', b'timing', False, b'provides extra data (costly)'),
1435 1438 ])
1436 1439 def perfhelperpathcopies(ui, repo, revs=[], **opts):
1437 1440 """find statistic about potential parameters for the `perftracecopies`
1438 1441
1439 1442 This command find source-destination pair relevant for copytracing testing.
1440 1443 It report value for some of the parameters that impact copy tracing time.
1441 1444
1442 1445 If `--timing` is set, rename detection is run and the associated timing
1443 1446 will be reported. The extra details comes at the cost of a slower command
1444 1447 execution.
1445 1448
1446 1449 Since the rename detection is only run once, other factors might easily
1447 1450 affect the precision of the timing. However it should give a good
1448 1451 approximation of which revision pairs are very costly.
1449 1452 """
1450 1453 opts = _byteskwargs(opts)
1451 1454 fm = ui.formatter(b'perf', opts)
1452 1455 dotiming = opts[b'timing']
1453 1456
1454 1457 if dotiming:
1455 1458 header = '%12s %12s %12s %12s %12s %12s\n'
1456 1459 output = ("%(source)12s %(destination)12s "
1457 1460 "%(nbrevs)12d %(nbmissingfiles)12d "
1458 1461 "%(nbrenamedfiles)12d %(time)18.5f\n")
1459 1462 header_names = ("source", "destination", "nb-revs", "nb-files",
1460 1463 "nb-renames", "time")
1461 1464 fm.plain(header % header_names)
1462 1465 else:
1463 1466 header = '%12s %12s %12s %12s\n'
1464 1467 output = ("%(source)12s %(destination)12s "
1465 1468 "%(nbrevs)12d %(nbmissingfiles)12d\n")
1466 1469 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
1467 1470
1468 1471 if not revs:
1469 1472 revs = ['all()']
1470 1473 revs = scmutil.revrange(repo, revs)
1471 1474
1472 1475 roi = repo.revs('merge() and %ld', revs)
1473 1476 for r in roi:
1474 1477 ctx = repo[r]
1475 1478 p1 = ctx.p1().rev()
1476 1479 p2 = ctx.p2().rev()
1477 1480 bases = repo.changelog._commonancestorsheads(p1, p2)
1478 1481 for p in (p1, p2):
1479 1482 for b in bases:
1480 1483 base = repo[b]
1481 1484 parent = repo[p]
1482 1485 missing = copies._computeforwardmissing(base, parent)
1483 1486 if not missing:
1484 1487 continue
1485 1488 data = {
1486 1489 b'source': base.hex(),
1487 1490 b'destination': parent.hex(),
1488 1491 b'nbrevs': len(repo.revs('%d::%d', b, p)),
1489 1492 b'nbmissingfiles': len(missing),
1490 1493 }
1491 1494 if dotiming:
1492 1495 begin = util.timer()
1493 1496 renames = copies.pathcopies(base, parent)
1494 1497 end = util.timer()
1495 1498 # not very stable timing since we did only one run
1496 1499 data['time'] = end - begin
1497 1500 data['nbrenamedfiles'] = len(renames)
1498 1501 fm.startitem()
1499 1502 fm.data(**data)
1500 1503 out = data.copy()
1501 1504 out['source'] = fm.hexfunc(base.node())
1502 1505 out['destination'] = fm.hexfunc(parent.node())
1503 1506 fm.plain(output % out)
1504 1507
1505 1508 fm.end()
1506 1509
1507 1510 @command(b'perfcca', formatteropts)
1508 1511 def perfcca(ui, repo, **opts):
1509 1512 opts = _byteskwargs(opts)
1510 1513 timer, fm = gettimer(ui, opts)
1511 1514 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1512 1515 fm.end()
1513 1516
1514 1517 @command(b'perffncacheload', formatteropts)
1515 1518 def perffncacheload(ui, repo, **opts):
1516 1519 opts = _byteskwargs(opts)
1517 1520 timer, fm = gettimer(ui, opts)
1518 1521 s = repo.store
1519 1522 def d():
1520 1523 s.fncache._load()
1521 1524 timer(d)
1522 1525 fm.end()
1523 1526
1524 1527 @command(b'perffncachewrite', formatteropts)
1525 1528 def perffncachewrite(ui, repo, **opts):
1526 1529 opts = _byteskwargs(opts)
1527 1530 timer, fm = gettimer(ui, opts)
1528 1531 s = repo.store
1529 1532 lock = repo.lock()
1530 1533 s.fncache._load()
1531 1534 tr = repo.transaction(b'perffncachewrite')
1532 1535 tr.addbackup(b'fncache')
1533 1536 def d():
1534 1537 s.fncache._dirty = True
1535 1538 s.fncache.write(tr)
1536 1539 timer(d)
1537 1540 tr.close()
1538 1541 lock.release()
1539 1542 fm.end()
1540 1543
1541 1544 @command(b'perffncacheencode', formatteropts)
1542 1545 def perffncacheencode(ui, repo, **opts):
1543 1546 opts = _byteskwargs(opts)
1544 1547 timer, fm = gettimer(ui, opts)
1545 1548 s = repo.store
1546 1549 s.fncache._load()
1547 1550 def d():
1548 1551 for p in s.fncache.entries:
1549 1552 s.encode(p)
1550 1553 timer(d)
1551 1554 fm.end()
1552 1555
1553 1556 def _bdiffworker(q, blocks, xdiff, ready, done):
1554 1557 while not done.is_set():
1555 1558 pair = q.get()
1556 1559 while pair is not None:
1557 1560 if xdiff:
1558 1561 mdiff.bdiff.xdiffblocks(*pair)
1559 1562 elif blocks:
1560 1563 mdiff.bdiff.blocks(*pair)
1561 1564 else:
1562 1565 mdiff.textdiff(*pair)
1563 1566 q.task_done()
1564 1567 pair = q.get()
1565 1568 q.task_done() # for the None one
1566 1569 with ready:
1567 1570 ready.wait()
1568 1571
1569 1572 def _manifestrevision(repo, mnode):
1570 1573 ml = repo.manifestlog
1571 1574
1572 1575 if util.safehasattr(ml, b'getstorage'):
1573 1576 store = ml.getstorage(b'')
1574 1577 else:
1575 1578 store = ml._revlog
1576 1579
1577 1580 return store.revision(mnode)
1578 1581
1579 1582 @command(b'perfbdiff', revlogopts + formatteropts + [
1580 1583 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1581 1584 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1582 1585 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1583 1586 (b'', b'blocks', False, b'test computing diffs into blocks'),
1584 1587 (b'', b'xdiff', False, b'use xdiff algorithm'),
1585 1588 ],
1586 1589
1587 1590 b'-c|-m|FILE REV')
1588 1591 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1589 1592 """benchmark a bdiff between revisions
1590 1593
1591 1594 By default, benchmark a bdiff between its delta parent and itself.
1592 1595
1593 1596 With ``--count``, benchmark bdiffs between delta parents and self for N
1594 1597 revisions starting at the specified revision.
1595 1598
1596 1599 With ``--alldata``, assume the requested revision is a changeset and
1597 1600 measure bdiffs for all changes related to that changeset (manifest
1598 1601 and filelogs).
1599 1602 """
1600 1603 opts = _byteskwargs(opts)
1601 1604
1602 1605 if opts[b'xdiff'] and not opts[b'blocks']:
1603 1606 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1604 1607
1605 1608 if opts[b'alldata']:
1606 1609 opts[b'changelog'] = True
1607 1610
1608 1611 if opts.get(b'changelog') or opts.get(b'manifest'):
1609 1612 file_, rev = None, file_
1610 1613 elif rev is None:
1611 1614 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1612 1615
1613 1616 blocks = opts[b'blocks']
1614 1617 xdiff = opts[b'xdiff']
1615 1618 textpairs = []
1616 1619
1617 1620 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1618 1621
1619 1622 startrev = r.rev(r.lookup(rev))
1620 1623 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1621 1624 if opts[b'alldata']:
1622 1625 # Load revisions associated with changeset.
1623 1626 ctx = repo[rev]
1624 1627 mtext = _manifestrevision(repo, ctx.manifestnode())
1625 1628 for pctx in ctx.parents():
1626 1629 pman = _manifestrevision(repo, pctx.manifestnode())
1627 1630 textpairs.append((pman, mtext))
1628 1631
1629 1632 # Load filelog revisions by iterating manifest delta.
1630 1633 man = ctx.manifest()
1631 1634 pman = ctx.p1().manifest()
1632 1635 for filename, change in pman.diff(man).items():
1633 1636 fctx = repo.file(filename)
1634 1637 f1 = fctx.revision(change[0][0] or -1)
1635 1638 f2 = fctx.revision(change[1][0] or -1)
1636 1639 textpairs.append((f1, f2))
1637 1640 else:
1638 1641 dp = r.deltaparent(rev)
1639 1642 textpairs.append((r.revision(dp), r.revision(rev)))
1640 1643
1641 1644 withthreads = threads > 0
1642 1645 if not withthreads:
1643 1646 def d():
1644 1647 for pair in textpairs:
1645 1648 if xdiff:
1646 1649 mdiff.bdiff.xdiffblocks(*pair)
1647 1650 elif blocks:
1648 1651 mdiff.bdiff.blocks(*pair)
1649 1652 else:
1650 1653 mdiff.textdiff(*pair)
1651 1654 else:
1652 1655 q = queue()
1653 1656 for i in _xrange(threads):
1654 1657 q.put(None)
1655 1658 ready = threading.Condition()
1656 1659 done = threading.Event()
1657 1660 for i in _xrange(threads):
1658 1661 threading.Thread(target=_bdiffworker,
1659 1662 args=(q, blocks, xdiff, ready, done)).start()
1660 1663 q.join()
1661 1664 def d():
1662 1665 for pair in textpairs:
1663 1666 q.put(pair)
1664 1667 for i in _xrange(threads):
1665 1668 q.put(None)
1666 1669 with ready:
1667 1670 ready.notify_all()
1668 1671 q.join()
1669 1672 timer, fm = gettimer(ui, opts)
1670 1673 timer(d)
1671 1674 fm.end()
1672 1675
1673 1676 if withthreads:
1674 1677 done.set()
1675 1678 for i in _xrange(threads):
1676 1679 q.put(None)
1677 1680 with ready:
1678 1681 ready.notify_all()
1679 1682
1680 1683 @command(b'perfunidiff', revlogopts + formatteropts + [
1681 1684 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1682 1685 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
1683 1686 ], b'-c|-m|FILE REV')
1684 1687 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1685 1688 """benchmark a unified diff between revisions
1686 1689
1687 1690 This doesn't include any copy tracing - it's just a unified diff
1688 1691 of the texts.
1689 1692
1690 1693 By default, benchmark a diff between its delta parent and itself.
1691 1694
1692 1695 With ``--count``, benchmark diffs between delta parents and self for N
1693 1696 revisions starting at the specified revision.
1694 1697
1695 1698 With ``--alldata``, assume the requested revision is a changeset and
1696 1699 measure diffs for all changes related to that changeset (manifest
1697 1700 and filelogs).
1698 1701 """
1699 1702 opts = _byteskwargs(opts)
1700 1703 if opts[b'alldata']:
1701 1704 opts[b'changelog'] = True
1702 1705
1703 1706 if opts.get(b'changelog') or opts.get(b'manifest'):
1704 1707 file_, rev = None, file_
1705 1708 elif rev is None:
1706 1709 raise error.CommandError(b'perfunidiff', b'invalid arguments')
1707 1710
1708 1711 textpairs = []
1709 1712
1710 1713 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
1711 1714
1712 1715 startrev = r.rev(r.lookup(rev))
1713 1716 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1714 1717 if opts[b'alldata']:
1715 1718 # Load revisions associated with changeset.
1716 1719 ctx = repo[rev]
1717 1720 mtext = _manifestrevision(repo, ctx.manifestnode())
1718 1721 for pctx in ctx.parents():
1719 1722 pman = _manifestrevision(repo, pctx.manifestnode())
1720 1723 textpairs.append((pman, mtext))
1721 1724
1722 1725 # Load filelog revisions by iterating manifest delta.
1723 1726 man = ctx.manifest()
1724 1727 pman = ctx.p1().manifest()
1725 1728 for filename, change in pman.diff(man).items():
1726 1729 fctx = repo.file(filename)
1727 1730 f1 = fctx.revision(change[0][0] or -1)
1728 1731 f2 = fctx.revision(change[1][0] or -1)
1729 1732 textpairs.append((f1, f2))
1730 1733 else:
1731 1734 dp = r.deltaparent(rev)
1732 1735 textpairs.append((r.revision(dp), r.revision(rev)))
1733 1736
1734 1737 def d():
1735 1738 for left, right in textpairs:
1736 1739 # The date strings don't matter, so we pass empty strings.
1737 1740 headerlines, hunks = mdiff.unidiff(
1738 1741 left, b'', right, b'', b'left', b'right', binary=False)
1739 1742 # consume iterators in roughly the way patch.py does
1740 1743 b'\n'.join(headerlines)
1741 1744 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1742 1745 timer, fm = gettimer(ui, opts)
1743 1746 timer(d)
1744 1747 fm.end()
1745 1748
1746 1749 @command(b'perfdiffwd', formatteropts)
1747 1750 def perfdiffwd(ui, repo, **opts):
1748 1751 """Profile diff of working directory changes"""
1749 1752 opts = _byteskwargs(opts)
1750 1753 timer, fm = gettimer(ui, opts)
1751 1754 options = {
1752 1755 'w': 'ignore_all_space',
1753 1756 'b': 'ignore_space_change',
1754 1757 'B': 'ignore_blank_lines',
1755 1758 }
1756 1759
1757 1760 for diffopt in ('', 'w', 'b', 'B', 'wB'):
1758 1761 opts = dict((options[c], b'1') for c in diffopt)
1759 1762 def d():
1760 1763 ui.pushbuffer()
1761 1764 commands.diff(ui, repo, **opts)
1762 1765 ui.popbuffer()
1763 1766 diffopt = diffopt.encode('ascii')
1764 1767 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
1765 1768 timer(d, title=title)
1766 1769 fm.end()
1767 1770
1768 1771 @command(b'perfrevlogindex', revlogopts + formatteropts,
1769 1772 b'-c|-m|FILE')
1770 1773 def perfrevlogindex(ui, repo, file_=None, **opts):
1771 1774 """Benchmark operations against a revlog index.
1772 1775
1773 1776 This tests constructing a revlog instance, reading index data,
1774 1777 parsing index data, and performing various operations related to
1775 1778 index data.
1776 1779 """
1777 1780
1778 1781 opts = _byteskwargs(opts)
1779 1782
1780 1783 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
1781 1784
1782 1785 opener = getattr(rl, 'opener') # trick linter
1783 1786 indexfile = rl.indexfile
1784 1787 data = opener.read(indexfile)
1785 1788
1786 1789 header = struct.unpack(b'>I', data[0:4])[0]
1787 1790 version = header & 0xFFFF
1788 1791 if version == 1:
1789 1792 revlogio = revlog.revlogio()
1790 1793 inline = header & (1 << 16)
1791 1794 else:
1792 1795 raise error.Abort((b'unsupported revlog version: %d') % version)
1793 1796
1794 1797 rllen = len(rl)
1795 1798
1796 1799 node0 = rl.node(0)
1797 1800 node25 = rl.node(rllen // 4)
1798 1801 node50 = rl.node(rllen // 2)
1799 1802 node75 = rl.node(rllen // 4 * 3)
1800 1803 node100 = rl.node(rllen - 1)
1801 1804
1802 1805 allrevs = range(rllen)
1803 1806 allrevsrev = list(reversed(allrevs))
1804 1807 allnodes = [rl.node(rev) for rev in range(rllen)]
1805 1808 allnodesrev = list(reversed(allnodes))
1806 1809
1807 1810 def constructor():
1808 1811 revlog.revlog(opener, indexfile)
1809 1812
1810 1813 def read():
1811 1814 with opener(indexfile) as fh:
1812 1815 fh.read()
1813 1816
1814 1817 def parseindex():
1815 1818 revlogio.parseindex(data, inline)
1816 1819
1817 1820 def getentry(revornode):
1818 1821 index = revlogio.parseindex(data, inline)[0]
1819 1822 index[revornode]
1820 1823
1821 1824 def getentries(revs, count=1):
1822 1825 index = revlogio.parseindex(data, inline)[0]
1823 1826
1824 1827 for i in range(count):
1825 1828 for rev in revs:
1826 1829 index[rev]
1827 1830
1828 1831 def resolvenode(node):
1829 1832 nodemap = revlogio.parseindex(data, inline)[1]
1830 1833 # This only works for the C code.
1831 1834 if nodemap is None:
1832 1835 return
1833 1836
1834 1837 try:
1835 1838 nodemap[node]
1836 1839 except error.RevlogError:
1837 1840 pass
1838 1841
1839 1842 def resolvenodes(nodes, count=1):
1840 1843 nodemap = revlogio.parseindex(data, inline)[1]
1841 1844 if nodemap is None:
1842 1845 return
1843 1846
1844 1847 for i in range(count):
1845 1848 for node in nodes:
1846 1849 try:
1847 1850 nodemap[node]
1848 1851 except error.RevlogError:
1849 1852 pass
1850 1853
1851 1854 benches = [
1852 1855 (constructor, b'revlog constructor'),
1853 1856 (read, b'read'),
1854 1857 (parseindex, b'create index object'),
1855 1858 (lambda: getentry(0), b'retrieve index entry for rev 0'),
1856 1859 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
1857 1860 (lambda: resolvenode(node0), b'look up node at rev 0'),
1858 1861 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
1859 1862 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
1860 1863 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
1861 1864 (lambda: resolvenode(node100), b'look up node at tip'),
1862 1865 # 2x variation is to measure caching impact.
1863 1866 (lambda: resolvenodes(allnodes),
1864 1867 b'look up all nodes (forward)'),
1865 1868 (lambda: resolvenodes(allnodes, 2),
1866 1869 b'look up all nodes 2x (forward)'),
1867 1870 (lambda: resolvenodes(allnodesrev),
1868 1871 b'look up all nodes (reverse)'),
1869 1872 (lambda: resolvenodes(allnodesrev, 2),
1870 1873 b'look up all nodes 2x (reverse)'),
1871 1874 (lambda: getentries(allrevs),
1872 1875 b'retrieve all index entries (forward)'),
1873 1876 (lambda: getentries(allrevs, 2),
1874 1877 b'retrieve all index entries 2x (forward)'),
1875 1878 (lambda: getentries(allrevsrev),
1876 1879 b'retrieve all index entries (reverse)'),
1877 1880 (lambda: getentries(allrevsrev, 2),
1878 1881 b'retrieve all index entries 2x (reverse)'),
1879 1882 ]
1880 1883
1881 1884 for fn, title in benches:
1882 1885 timer, fm = gettimer(ui, opts)
1883 1886 timer(fn, title=title)
1884 1887 fm.end()
1885 1888
1886 1889 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
1887 1890 [(b'd', b'dist', 100, b'distance between the revisions'),
1888 1891 (b's', b'startrev', 0, b'revision to start reading at'),
1889 1892 (b'', b'reverse', False, b'read in reverse')],
1890 1893 b'-c|-m|FILE')
1891 1894 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
1892 1895 **opts):
1893 1896 """Benchmark reading a series of revisions from a revlog.
1894 1897
1895 1898 By default, we read every ``-d/--dist`` revision from 0 to tip of
1896 1899 the specified revlog.
1897 1900
1898 1901 The start revision can be defined via ``-s/--startrev``.
1899 1902 """
1900 1903 opts = _byteskwargs(opts)
1901 1904
1902 1905 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
1903 1906 rllen = getlen(ui)(rl)
1904 1907
1905 1908 if startrev < 0:
1906 1909 startrev = rllen + startrev
1907 1910
1908 1911 def d():
1909 1912 rl.clearcaches()
1910 1913
1911 1914 beginrev = startrev
1912 1915 endrev = rllen
1913 1916 dist = opts[b'dist']
1914 1917
1915 1918 if reverse:
1916 1919 beginrev, endrev = endrev - 1, beginrev - 1
1917 1920 dist = -1 * dist
1918 1921
1919 1922 for x in _xrange(beginrev, endrev, dist):
1920 1923 # Old revisions don't support passing int.
1921 1924 n = rl.node(x)
1922 1925 rl.revision(n)
1923 1926
1924 1927 timer, fm = gettimer(ui, opts)
1925 1928 timer(d)
1926 1929 fm.end()
1927 1930
1928 1931 @command(b'perfrevlogwrite', revlogopts + formatteropts +
1929 1932 [(b's', b'startrev', 1000, b'revision to start writing at'),
1930 1933 (b'', b'stoprev', -1, b'last revision to write'),
1931 1934 (b'', b'count', 3, b'last revision to write'),
1932 1935 (b'', b'details', False, b'print timing for every revisions tested'),
1933 1936 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
1934 1937 (b'', b'lazydeltabase', True, b'try the provided delta first'),
1935 1938 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1936 1939 ],
1937 1940 b'-c|-m|FILE')
1938 1941 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
1939 1942 """Benchmark writing a series of revisions to a revlog.
1940 1943
1941 1944 Possible source values are:
1942 1945 * `full`: add from a full text (default).
1943 1946 * `parent-1`: add from a delta to the first parent
1944 1947 * `parent-2`: add from a delta to the second parent if it exists
1945 1948 (use a delta from the first parent otherwise)
1946 1949 * `parent-smallest`: add from the smallest delta (either p1 or p2)
1947 1950 * `storage`: add from the existing precomputed deltas
1948 1951 """
1949 1952 opts = _byteskwargs(opts)
1950 1953
1951 1954 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
1952 1955 rllen = getlen(ui)(rl)
1953 1956 if startrev < 0:
1954 1957 startrev = rllen + startrev
1955 1958 if stoprev < 0:
1956 1959 stoprev = rllen + stoprev
1957 1960
1958 1961 lazydeltabase = opts['lazydeltabase']
1959 1962 source = opts['source']
1960 1963 clearcaches = opts['clear_caches']
1961 1964 validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest',
1962 1965 b'storage')
1963 1966 if source not in validsource:
1964 1967 raise error.Abort('invalid source type: %s' % source)
1965 1968
1966 1969 ### actually gather results
1967 1970 count = opts['count']
1968 1971 if count <= 0:
1969 1972 raise error.Abort('invalide run count: %d' % count)
1970 1973 allresults = []
1971 1974 for c in range(count):
1972 1975 timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1,
1973 1976 lazydeltabase=lazydeltabase,
1974 1977 clearcaches=clearcaches)
1975 1978 allresults.append(timing)
1976 1979
1977 1980 ### consolidate the results in a single list
1978 1981 results = []
1979 1982 for idx, (rev, t) in enumerate(allresults[0]):
1980 1983 ts = [t]
1981 1984 for other in allresults[1:]:
1982 1985 orev, ot = other[idx]
1983 1986 assert orev == rev
1984 1987 ts.append(ot)
1985 1988 results.append((rev, ts))
1986 1989 resultcount = len(results)
1987 1990
1988 1991 ### Compute and display relevant statistics
1989 1992
1990 1993 # get a formatter
1991 1994 fm = ui.formatter(b'perf', opts)
1992 1995 displayall = ui.configbool(b"perf", b"all-timing", False)
1993 1996
1994 1997 # print individual details if requested
1995 1998 if opts['details']:
1996 1999 for idx, item in enumerate(results, 1):
1997 2000 rev, data = item
1998 2001 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
1999 2002 formatone(fm, data, title=title, displayall=displayall)
2000 2003
2001 2004 # sorts results by median time
2002 2005 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
2003 2006 # list of (name, index) to display)
2004 2007 relevants = [
2005 2008 ("min", 0),
2006 2009 ("10%", resultcount * 10 // 100),
2007 2010 ("25%", resultcount * 25 // 100),
2008 2011 ("50%", resultcount * 70 // 100),
2009 2012 ("75%", resultcount * 75 // 100),
2010 2013 ("90%", resultcount * 90 // 100),
2011 2014 ("95%", resultcount * 95 // 100),
2012 2015 ("99%", resultcount * 99 // 100),
2013 2016 ("99.9%", resultcount * 999 // 1000),
2014 2017 ("99.99%", resultcount * 9999 // 10000),
2015 2018 ("99.999%", resultcount * 99999 // 100000),
2016 2019 ("max", -1),
2017 2020 ]
2018 2021 if not ui.quiet:
2019 2022 for name, idx in relevants:
2020 2023 data = results[idx]
2021 2024 title = '%s of %d, rev %d' % (name, resultcount, data[0])
2022 2025 formatone(fm, data[1], title=title, displayall=displayall)
2023 2026
2024 2027 # XXX summing that many float will not be very precise, we ignore this fact
2025 2028 # for now
2026 2029 totaltime = []
2027 2030 for item in allresults:
2028 2031 totaltime.append((sum(x[1][0] for x in item),
2029 2032 sum(x[1][1] for x in item),
2030 2033 sum(x[1][2] for x in item),)
2031 2034 )
2032 2035 formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
2033 2036 displayall=displayall)
2034 2037 fm.end()
2035 2038
2036 2039 class _faketr(object):
2037 2040 def add(s, x, y, z=None):
2038 2041 return None
2039 2042
2040 2043 def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None,
2041 2044 lazydeltabase=True, clearcaches=True):
2042 2045 timings = []
2043 2046 tr = _faketr()
2044 2047 with _temprevlog(ui, orig, startrev) as dest:
2045 2048 dest._lazydeltabase = lazydeltabase
2046 2049 revs = list(orig.revs(startrev, stoprev))
2047 2050 total = len(revs)
2048 2051 topic = 'adding'
2049 2052 if runidx is not None:
2050 2053 topic += ' (run #%d)' % runidx
2051 2054 # Support both old and new progress API
2052 2055 if util.safehasattr(ui, 'makeprogress'):
2053 2056 progress = ui.makeprogress(topic, unit='revs', total=total)
2054 2057 def updateprogress(pos):
2055 2058 progress.update(pos)
2056 2059 def completeprogress():
2057 2060 progress.complete()
2058 2061 else:
2059 2062 def updateprogress(pos):
2060 2063 ui.progress(topic, pos, unit='revs', total=total)
2061 2064 def completeprogress():
2062 2065 ui.progress(topic, None, unit='revs', total=total)
2063 2066
2064 2067 for idx, rev in enumerate(revs):
2065 2068 updateprogress(idx)
2066 2069 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
2067 2070 if clearcaches:
2068 2071 dest.index.clearcaches()
2069 2072 dest.clearcaches()
2070 2073 with timeone() as r:
2071 2074 dest.addrawrevision(*addargs, **addkwargs)
2072 2075 timings.append((rev, r[0]))
2073 2076 updateprogress(total)
2074 2077 completeprogress()
2075 2078 return timings
2076 2079
2077 2080 def _getrevisionseed(orig, rev, tr, source):
2078 2081 from mercurial.node import nullid
2079 2082
2080 2083 linkrev = orig.linkrev(rev)
2081 2084 node = orig.node(rev)
2082 2085 p1, p2 = orig.parents(node)
2083 2086 flags = orig.flags(rev)
2084 2087 cachedelta = None
2085 2088 text = None
2086 2089
2087 2090 if source == b'full':
2088 2091 text = orig.revision(rev)
2089 2092 elif source == b'parent-1':
2090 2093 baserev = orig.rev(p1)
2091 2094 cachedelta = (baserev, orig.revdiff(p1, rev))
2092 2095 elif source == b'parent-2':
2093 2096 parent = p2
2094 2097 if p2 == nullid:
2095 2098 parent = p1
2096 2099 baserev = orig.rev(parent)
2097 2100 cachedelta = (baserev, orig.revdiff(parent, rev))
2098 2101 elif source == b'parent-smallest':
2099 2102 p1diff = orig.revdiff(p1, rev)
2100 2103 parent = p1
2101 2104 diff = p1diff
2102 2105 if p2 != nullid:
2103 2106 p2diff = orig.revdiff(p2, rev)
2104 2107 if len(p1diff) > len(p2diff):
2105 2108 parent = p2
2106 2109 diff = p2diff
2107 2110 baserev = orig.rev(parent)
2108 2111 cachedelta = (baserev, diff)
2109 2112 elif source == b'storage':
2110 2113 baserev = orig.deltaparent(rev)
2111 2114 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
2112 2115
2113 2116 return ((text, tr, linkrev, p1, p2),
2114 2117 {'node': node, 'flags': flags, 'cachedelta': cachedelta})
2115 2118
2116 2119 @contextlib.contextmanager
2117 2120 def _temprevlog(ui, orig, truncaterev):
2118 2121 from mercurial import vfs as vfsmod
2119 2122
2120 2123 if orig._inline:
2121 2124 raise error.Abort('not supporting inline revlog (yet)')
2122 2125
2123 2126 origindexpath = orig.opener.join(orig.indexfile)
2124 2127 origdatapath = orig.opener.join(orig.datafile)
2125 2128 indexname = 'revlog.i'
2126 2129 dataname = 'revlog.d'
2127 2130
2128 2131 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
2129 2132 try:
2130 2133 # copy the data file in a temporary directory
2131 2134 ui.debug('copying data in %s\n' % tmpdir)
2132 2135 destindexpath = os.path.join(tmpdir, 'revlog.i')
2133 2136 destdatapath = os.path.join(tmpdir, 'revlog.d')
2134 2137 shutil.copyfile(origindexpath, destindexpath)
2135 2138 shutil.copyfile(origdatapath, destdatapath)
2136 2139
2137 2140 # remove the data we want to add again
2138 2141 ui.debug('truncating data to be rewritten\n')
2139 2142 with open(destindexpath, 'ab') as index:
2140 2143 index.seek(0)
2141 2144 index.truncate(truncaterev * orig._io.size)
2142 2145 with open(destdatapath, 'ab') as data:
2143 2146 data.seek(0)
2144 2147 data.truncate(orig.start(truncaterev))
2145 2148
2146 2149 # instantiate a new revlog from the temporary copy
2147 2150 ui.debug('truncating adding to be rewritten\n')
2148 2151 vfs = vfsmod.vfs(tmpdir)
2149 2152 vfs.options = getattr(orig.opener, 'options', None)
2150 2153
2151 2154 dest = revlog.revlog(vfs,
2152 2155 indexfile=indexname,
2153 2156 datafile=dataname)
2154 2157 if dest._inline:
2155 2158 raise error.Abort('not supporting inline revlog (yet)')
2156 2159 # make sure internals are initialized
2157 2160 dest.revision(len(dest) - 1)
2158 2161 yield dest
2159 2162 del dest, vfs
2160 2163 finally:
2161 2164 shutil.rmtree(tmpdir, True)
2162 2165
2163 2166 @command(b'perfrevlogchunks', revlogopts + formatteropts +
2164 2167 [(b'e', b'engines', b'', b'compression engines to use'),
2165 2168 (b's', b'startrev', 0, b'revision to start at')],
2166 2169 b'-c|-m|FILE')
2167 2170 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
2168 2171 """Benchmark operations on revlog chunks.
2169 2172
2170 2173 Logically, each revlog is a collection of fulltext revisions. However,
2171 2174 stored within each revlog are "chunks" of possibly compressed data. This
2172 2175 data needs to be read and decompressed or compressed and written.
2173 2176
2174 2177 This command measures the time it takes to read+decompress and recompress
2175 2178 chunks in a revlog. It effectively isolates I/O and compression performance.
2176 2179 For measurements of higher-level operations like resolving revisions,
2177 2180 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
2178 2181 """
2179 2182 opts = _byteskwargs(opts)
2180 2183
2181 2184 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
2182 2185
2183 2186 # _chunkraw was renamed to _getsegmentforrevs.
2184 2187 try:
2185 2188 segmentforrevs = rl._getsegmentforrevs
2186 2189 except AttributeError:
2187 2190 segmentforrevs = rl._chunkraw
2188 2191
2189 2192 # Verify engines argument.
2190 2193 if engines:
2191 2194 engines = set(e.strip() for e in engines.split(b','))
2192 2195 for engine in engines:
2193 2196 try:
2194 2197 util.compressionengines[engine]
2195 2198 except KeyError:
2196 2199 raise error.Abort(b'unknown compression engine: %s' % engine)
2197 2200 else:
2198 2201 engines = []
2199 2202 for e in util.compengines:
2200 2203 engine = util.compengines[e]
2201 2204 try:
2202 2205 if engine.available():
2203 2206 engine.revlogcompressor().compress(b'dummy')
2204 2207 engines.append(e)
2205 2208 except NotImplementedError:
2206 2209 pass
2207 2210
2208 2211 revs = list(rl.revs(startrev, len(rl) - 1))
2209 2212
2210 2213 def rlfh(rl):
2211 2214 if rl._inline:
2212 2215 return getsvfs(repo)(rl.indexfile)
2213 2216 else:
2214 2217 return getsvfs(repo)(rl.datafile)
2215 2218
2216 2219 def doread():
2217 2220 rl.clearcaches()
2218 2221 for rev in revs:
2219 2222 segmentforrevs(rev, rev)
2220 2223
2221 2224 def doreadcachedfh():
2222 2225 rl.clearcaches()
2223 2226 fh = rlfh(rl)
2224 2227 for rev in revs:
2225 2228 segmentforrevs(rev, rev, df=fh)
2226 2229
2227 2230 def doreadbatch():
2228 2231 rl.clearcaches()
2229 2232 segmentforrevs(revs[0], revs[-1])
2230 2233
2231 2234 def doreadbatchcachedfh():
2232 2235 rl.clearcaches()
2233 2236 fh = rlfh(rl)
2234 2237 segmentforrevs(revs[0], revs[-1], df=fh)
2235 2238
2236 2239 def dochunk():
2237 2240 rl.clearcaches()
2238 2241 fh = rlfh(rl)
2239 2242 for rev in revs:
2240 2243 rl._chunk(rev, df=fh)
2241 2244
2242 2245 chunks = [None]
2243 2246
2244 2247 def dochunkbatch():
2245 2248 rl.clearcaches()
2246 2249 fh = rlfh(rl)
2247 2250 # Save chunks as a side-effect.
2248 2251 chunks[0] = rl._chunks(revs, df=fh)
2249 2252
2250 2253 def docompress(compressor):
2251 2254 rl.clearcaches()
2252 2255
2253 2256 try:
2254 2257 # Swap in the requested compression engine.
2255 2258 oldcompressor = rl._compressor
2256 2259 rl._compressor = compressor
2257 2260 for chunk in chunks[0]:
2258 2261 rl.compress(chunk)
2259 2262 finally:
2260 2263 rl._compressor = oldcompressor
2261 2264
2262 2265 benches = [
2263 2266 (lambda: doread(), b'read'),
2264 2267 (lambda: doreadcachedfh(), b'read w/ reused fd'),
2265 2268 (lambda: doreadbatch(), b'read batch'),
2266 2269 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
2267 2270 (lambda: dochunk(), b'chunk'),
2268 2271 (lambda: dochunkbatch(), b'chunk batch'),
2269 2272 ]
2270 2273
2271 2274 for engine in sorted(engines):
2272 2275 compressor = util.compengines[engine].revlogcompressor()
2273 2276 benches.append((functools.partial(docompress, compressor),
2274 2277 b'compress w/ %s' % engine))
2275 2278
2276 2279 for fn, title in benches:
2277 2280 timer, fm = gettimer(ui, opts)
2278 2281 timer(fn, title=title)
2279 2282 fm.end()
2280 2283
2281 2284 @command(b'perfrevlogrevision', revlogopts + formatteropts +
2282 2285 [(b'', b'cache', False, b'use caches instead of clearing')],
2283 2286 b'-c|-m|FILE REV')
2284 2287 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
2285 2288 """Benchmark obtaining a revlog revision.
2286 2289
2287 2290 Obtaining a revlog revision consists of roughly the following steps:
2288 2291
2289 2292 1. Compute the delta chain
2290 2293 2. Slice the delta chain if applicable
2291 2294 3. Obtain the raw chunks for that delta chain
2292 2295 4. Decompress each raw chunk
2293 2296 5. Apply binary patches to obtain fulltext
2294 2297 6. Verify hash of fulltext
2295 2298
2296 2299 This command measures the time spent in each of these phases.
2297 2300 """
2298 2301 opts = _byteskwargs(opts)
2299 2302
2300 2303 if opts.get(b'changelog') or opts.get(b'manifest'):
2301 2304 file_, rev = None, file_
2302 2305 elif rev is None:
2303 2306 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
2304 2307
2305 2308 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
2306 2309
2307 2310 # _chunkraw was renamed to _getsegmentforrevs.
2308 2311 try:
2309 2312 segmentforrevs = r._getsegmentforrevs
2310 2313 except AttributeError:
2311 2314 segmentforrevs = r._chunkraw
2312 2315
2313 2316 node = r.lookup(rev)
2314 2317 rev = r.rev(node)
2315 2318
2316 2319 def getrawchunks(data, chain):
2317 2320 start = r.start
2318 2321 length = r.length
2319 2322 inline = r._inline
2320 2323 iosize = r._io.size
2321 2324 buffer = util.buffer
2322 2325
2323 2326 chunks = []
2324 2327 ladd = chunks.append
2325 2328 for idx, item in enumerate(chain):
2326 2329 offset = start(item[0])
2327 2330 bits = data[idx]
2328 2331 for rev in item:
2329 2332 chunkstart = start(rev)
2330 2333 if inline:
2331 2334 chunkstart += (rev + 1) * iosize
2332 2335 chunklength = length(rev)
2333 2336 ladd(buffer(bits, chunkstart - offset, chunklength))
2334 2337
2335 2338 return chunks
2336 2339
2337 2340 def dodeltachain(rev):
2338 2341 if not cache:
2339 2342 r.clearcaches()
2340 2343 r._deltachain(rev)
2341 2344
2342 2345 def doread(chain):
2343 2346 if not cache:
2344 2347 r.clearcaches()
2345 2348 for item in slicedchain:
2346 2349 segmentforrevs(item[0], item[-1])
2347 2350
2348 2351 def doslice(r, chain, size):
2349 2352 for s in slicechunk(r, chain, targetsize=size):
2350 2353 pass
2351 2354
2352 2355 def dorawchunks(data, chain):
2353 2356 if not cache:
2354 2357 r.clearcaches()
2355 2358 getrawchunks(data, chain)
2356 2359
2357 2360 def dodecompress(chunks):
2358 2361 decomp = r.decompress
2359 2362 for chunk in chunks:
2360 2363 decomp(chunk)
2361 2364
2362 2365 def dopatch(text, bins):
2363 2366 if not cache:
2364 2367 r.clearcaches()
2365 2368 mdiff.patches(text, bins)
2366 2369
2367 2370 def dohash(text):
2368 2371 if not cache:
2369 2372 r.clearcaches()
2370 2373 r.checkhash(text, node, rev=rev)
2371 2374
2372 2375 def dorevision():
2373 2376 if not cache:
2374 2377 r.clearcaches()
2375 2378 r.revision(node)
2376 2379
2377 2380 try:
2378 2381 from mercurial.revlogutils.deltas import slicechunk
2379 2382 except ImportError:
2380 2383 slicechunk = getattr(revlog, '_slicechunk', None)
2381 2384
2382 2385 size = r.length(rev)
2383 2386 chain = r._deltachain(rev)[0]
2384 2387 if not getattr(r, '_withsparseread', False):
2385 2388 slicedchain = (chain,)
2386 2389 else:
2387 2390 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
2388 2391 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
2389 2392 rawchunks = getrawchunks(data, slicedchain)
2390 2393 bins = r._chunks(chain)
2391 2394 text = bytes(bins[0])
2392 2395 bins = bins[1:]
2393 2396 text = mdiff.patches(text, bins)
2394 2397
2395 2398 benches = [
2396 2399 (lambda: dorevision(), b'full'),
2397 2400 (lambda: dodeltachain(rev), b'deltachain'),
2398 2401 (lambda: doread(chain), b'read'),
2399 2402 ]
2400 2403
2401 2404 if getattr(r, '_withsparseread', False):
2402 2405 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
2403 2406 benches.append(slicing)
2404 2407
2405 2408 benches.extend([
2406 2409 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
2407 2410 (lambda: dodecompress(rawchunks), b'decompress'),
2408 2411 (lambda: dopatch(text, bins), b'patch'),
2409 2412 (lambda: dohash(text), b'hash'),
2410 2413 ])
2411 2414
2412 2415 timer, fm = gettimer(ui, opts)
2413 2416 for fn, title in benches:
2414 2417 timer(fn, title=title)
2415 2418 fm.end()
2416 2419
2417 2420 @command(b'perfrevset',
2418 2421 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
2419 2422 (b'', b'contexts', False, b'obtain changectx for each revision')]
2420 2423 + formatteropts, b"REVSET")
2421 2424 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
2422 2425 """benchmark the execution time of a revset
2423 2426
2424 2427 Use the --clean option if need to evaluate the impact of build volatile
2425 2428 revisions set cache on the revset execution. Volatile cache hold filtered
2426 2429 and obsolete related cache."""
2427 2430 opts = _byteskwargs(opts)
2428 2431
2429 2432 timer, fm = gettimer(ui, opts)
2430 2433 def d():
2431 2434 if clear:
2432 2435 repo.invalidatevolatilesets()
2433 2436 if contexts:
2434 2437 for ctx in repo.set(expr): pass
2435 2438 else:
2436 2439 for r in repo.revs(expr): pass
2437 2440 timer(d)
2438 2441 fm.end()
2439 2442
2440 2443 @command(b'perfvolatilesets',
2441 2444 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
2442 2445 ] + formatteropts)
2443 2446 def perfvolatilesets(ui, repo, *names, **opts):
2444 2447 """benchmark the computation of various volatile set
2445 2448
2446 2449 Volatile set computes element related to filtering and obsolescence."""
2447 2450 opts = _byteskwargs(opts)
2448 2451 timer, fm = gettimer(ui, opts)
2449 2452 repo = repo.unfiltered()
2450 2453
2451 2454 def getobs(name):
2452 2455 def d():
2453 2456 repo.invalidatevolatilesets()
2454 2457 if opts[b'clear_obsstore']:
2455 2458 clearfilecache(repo, b'obsstore')
2456 2459 obsolete.getrevs(repo, name)
2457 2460 return d
2458 2461
2459 2462 allobs = sorted(obsolete.cachefuncs)
2460 2463 if names:
2461 2464 allobs = [n for n in allobs if n in names]
2462 2465
2463 2466 for name in allobs:
2464 2467 timer(getobs(name), title=name)
2465 2468
2466 2469 def getfiltered(name):
2467 2470 def d():
2468 2471 repo.invalidatevolatilesets()
2469 2472 if opts[b'clear_obsstore']:
2470 2473 clearfilecache(repo, b'obsstore')
2471 2474 repoview.filterrevs(repo, name)
2472 2475 return d
2473 2476
2474 2477 allfilter = sorted(repoview.filtertable)
2475 2478 if names:
2476 2479 allfilter = [n for n in allfilter if n in names]
2477 2480
2478 2481 for name in allfilter:
2479 2482 timer(getfiltered(name), title=name)
2480 2483 fm.end()
2481 2484
2482 2485 @command(b'perfbranchmap',
2483 2486 [(b'f', b'full', False,
2484 2487 b'Includes build time of subset'),
2485 2488 (b'', b'clear-revbranch', False,
2486 2489 b'purge the revbranch cache between computation'),
2487 2490 ] + formatteropts)
2488 2491 def perfbranchmap(ui, repo, *filternames, **opts):
2489 2492 """benchmark the update of a branchmap
2490 2493
2491 2494 This benchmarks the full repo.branchmap() call with read and write disabled
2492 2495 """
2493 2496 opts = _byteskwargs(opts)
2494 2497 full = opts.get(b"full", False)
2495 2498 clear_revbranch = opts.get(b"clear_revbranch", False)
2496 2499 timer, fm = gettimer(ui, opts)
2497 2500 def getbranchmap(filtername):
2498 2501 """generate a benchmark function for the filtername"""
2499 2502 if filtername is None:
2500 2503 view = repo
2501 2504 else:
2502 2505 view = repo.filtered(filtername)
2503 2506 if util.safehasattr(view._branchcaches, '_per_filter'):
2504 2507 filtered = view._branchcaches._per_filter
2505 2508 else:
2506 2509 # older versions
2507 2510 filtered = view._branchcaches
2508 2511 def d():
2509 2512 if clear_revbranch:
2510 2513 repo.revbranchcache()._clear()
2511 2514 if full:
2512 2515 view._branchcaches.clear()
2513 2516 else:
2514 2517 filtered.pop(filtername, None)
2515 2518 view.branchmap()
2516 2519 return d
2517 2520 # add filter in smaller subset to bigger subset
2518 2521 possiblefilters = set(repoview.filtertable)
2519 2522 if filternames:
2520 2523 possiblefilters &= set(filternames)
2521 2524 subsettable = getbranchmapsubsettable()
2522 2525 allfilters = []
2523 2526 while possiblefilters:
2524 2527 for name in possiblefilters:
2525 2528 subset = subsettable.get(name)
2526 2529 if subset not in possiblefilters:
2527 2530 break
2528 2531 else:
2529 2532 assert False, b'subset cycle %s!' % possiblefilters
2530 2533 allfilters.append(name)
2531 2534 possiblefilters.remove(name)
2532 2535
2533 2536 # warm the cache
2534 2537 if not full:
2535 2538 for name in allfilters:
2536 2539 repo.filtered(name).branchmap()
2537 2540 if not filternames or b'unfiltered' in filternames:
2538 2541 # add unfiltered
2539 2542 allfilters.append(None)
2540 2543
2541 2544 if util.safehasattr(branchmap.branchcache, 'fromfile'):
2542 2545 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
2543 2546 branchcacheread.set(classmethod(lambda *args: None))
2544 2547 else:
2545 2548 # older versions
2546 2549 branchcacheread = safeattrsetter(branchmap, b'read')
2547 2550 branchcacheread.set(lambda *args: None)
2548 2551 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
2549 2552 branchcachewrite.set(lambda *args: None)
2550 2553 try:
2551 2554 for name in allfilters:
2552 2555 printname = name
2553 2556 if name is None:
2554 2557 printname = b'unfiltered'
2555 2558 timer(getbranchmap(name), title=str(printname))
2556 2559 finally:
2557 2560 branchcacheread.restore()
2558 2561 branchcachewrite.restore()
2559 2562 fm.end()
2560 2563
2561 2564 @command(b'perfbranchmapupdate', [
2562 2565 (b'', b'base', [], b'subset of revision to start from'),
2563 2566 (b'', b'target', [], b'subset of revision to end with'),
2564 2567 (b'', b'clear-caches', False, b'clear cache between each runs')
2565 2568 ] + formatteropts)
2566 2569 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
2567 2570 """benchmark branchmap update from for <base> revs to <target> revs
2568 2571
2569 2572 If `--clear-caches` is passed, the following items will be reset before
2570 2573 each update:
2571 2574 * the changelog instance and associated indexes
2572 2575 * the rev-branch-cache instance
2573 2576
2574 2577 Examples:
2575 2578
2576 2579 # update for the one last revision
2577 2580 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
2578 2581
2579 2582 $ update for change coming with a new branch
2580 2583 $ hg perfbranchmapupdate --base 'stable' --target 'default'
2581 2584 """
2582 2585 from mercurial import branchmap
2583 2586 from mercurial import repoview
2584 2587 opts = _byteskwargs(opts)
2585 2588 timer, fm = gettimer(ui, opts)
2586 2589 clearcaches = opts[b'clear_caches']
2587 2590 unfi = repo.unfiltered()
2588 2591 x = [None] # used to pass data between closure
2589 2592
2590 2593 # we use a `list` here to avoid possible side effect from smartset
2591 2594 baserevs = list(scmutil.revrange(repo, base))
2592 2595 targetrevs = list(scmutil.revrange(repo, target))
2593 2596 if not baserevs:
2594 2597 raise error.Abort(b'no revisions selected for --base')
2595 2598 if not targetrevs:
2596 2599 raise error.Abort(b'no revisions selected for --target')
2597 2600
2598 2601 # make sure the target branchmap also contains the one in the base
2599 2602 targetrevs = list(set(baserevs) | set(targetrevs))
2600 2603 targetrevs.sort()
2601 2604
2602 2605 cl = repo.changelog
2603 2606 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
2604 2607 allbaserevs.sort()
2605 2608 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
2606 2609
2607 2610 newrevs = list(alltargetrevs.difference(allbaserevs))
2608 2611 newrevs.sort()
2609 2612
2610 2613 allrevs = frozenset(unfi.changelog.revs())
2611 2614 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
2612 2615 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
2613 2616
2614 2617 def basefilter(repo, visibilityexceptions=None):
2615 2618 return basefilterrevs
2616 2619
2617 2620 def targetfilter(repo, visibilityexceptions=None):
2618 2621 return targetfilterrevs
2619 2622
2620 2623 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
2621 2624 ui.status(msg % (len(allbaserevs), len(newrevs)))
2622 2625 if targetfilterrevs:
2623 2626 msg = b'(%d revisions still filtered)\n'
2624 2627 ui.status(msg % len(targetfilterrevs))
2625 2628
2626 2629 try:
2627 2630 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
2628 2631 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
2629 2632
2630 2633 baserepo = repo.filtered(b'__perf_branchmap_update_base')
2631 2634 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
2632 2635
2633 2636 # try to find an existing branchmap to reuse
2634 2637 subsettable = getbranchmapsubsettable()
2635 2638 candidatefilter = subsettable.get(None)
2636 2639 while candidatefilter is not None:
2637 2640 candidatebm = repo.filtered(candidatefilter).branchmap()
2638 2641 if candidatebm.validfor(baserepo):
2639 2642 filtered = repoview.filterrevs(repo, candidatefilter)
2640 2643 missing = [r for r in allbaserevs if r in filtered]
2641 2644 base = candidatebm.copy()
2642 2645 base.update(baserepo, missing)
2643 2646 break
2644 2647 candidatefilter = subsettable.get(candidatefilter)
2645 2648 else:
2646 2649 # no suitable subset where found
2647 2650 base = branchmap.branchcache()
2648 2651 base.update(baserepo, allbaserevs)
2649 2652
2650 2653 def setup():
2651 2654 x[0] = base.copy()
2652 2655 if clearcaches:
2653 2656 unfi._revbranchcache = None
2654 2657 clearchangelog(repo)
2655 2658
2656 2659 def bench():
2657 2660 x[0].update(targetrepo, newrevs)
2658 2661
2659 2662 timer(bench, setup=setup)
2660 2663 fm.end()
2661 2664 finally:
2662 2665 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
2663 2666 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
2664 2667
2665 2668 @command(b'perfbranchmapload', [
2666 2669 (b'f', b'filter', b'', b'Specify repoview filter'),
2667 2670 (b'', b'list', False, b'List brachmap filter caches'),
2668 2671 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
2669 2672
2670 2673 ] + formatteropts)
2671 2674 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
2672 2675 """benchmark reading the branchmap"""
2673 2676 opts = _byteskwargs(opts)
2674 2677 clearrevlogs = opts[b'clear_revlogs']
2675 2678
2676 2679 if list:
2677 2680 for name, kind, st in repo.cachevfs.readdir(stat=True):
2678 2681 if name.startswith(b'branch2'):
2679 2682 filtername = name.partition(b'-')[2] or b'unfiltered'
2680 2683 ui.status(b'%s - %s\n'
2681 2684 % (filtername, util.bytecount(st.st_size)))
2682 2685 return
2683 2686 if not filter:
2684 2687 filter = None
2685 2688 subsettable = getbranchmapsubsettable()
2686 2689 if filter is None:
2687 2690 repo = repo.unfiltered()
2688 2691 else:
2689 2692 repo = repoview.repoview(repo, filter)
2690 2693
2691 2694 repo.branchmap() # make sure we have a relevant, up to date branchmap
2692 2695
2693 2696 try:
2694 2697 fromfile = branchmap.branchcache.fromfile
2695 2698 except AttributeError:
2696 2699 # older versions
2697 2700 fromfile = branchmap.read
2698 2701
2699 2702 currentfilter = filter
2700 2703 # try once without timer, the filter may not be cached
2701 2704 while fromfile(repo) is None:
2702 2705 currentfilter = subsettable.get(currentfilter)
2703 2706 if currentfilter is None:
2704 2707 raise error.Abort(b'No branchmap cached for %s repo'
2705 2708 % (filter or b'unfiltered'))
2706 2709 repo = repo.filtered(currentfilter)
2707 2710 timer, fm = gettimer(ui, opts)
2708 2711 def setup():
2709 2712 if clearrevlogs:
2710 2713 clearchangelog(repo)
2711 2714 def bench():
2712 2715 fromfile(repo)
2713 2716 timer(bench, setup=setup)
2714 2717 fm.end()
2715 2718
2716 2719 @command(b'perfloadmarkers')
2717 2720 def perfloadmarkers(ui, repo):
2718 2721 """benchmark the time to parse the on-disk markers for a repo
2719 2722
2720 2723 Result is the number of markers in the repo."""
2721 2724 timer, fm = gettimer(ui)
2722 2725 svfs = getsvfs(repo)
2723 2726 timer(lambda: len(obsolete.obsstore(svfs)))
2724 2727 fm.end()
2725 2728
2726 2729 @command(b'perflrucachedict', formatteropts +
2727 2730 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
2728 2731 (b'', b'mincost', 0, b'smallest cost of items in cache'),
2729 2732 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
2730 2733 (b'', b'size', 4, b'size of cache'),
2731 2734 (b'', b'gets', 10000, b'number of key lookups'),
2732 2735 (b'', b'sets', 10000, b'number of key sets'),
2733 2736 (b'', b'mixed', 10000, b'number of mixed mode operations'),
2734 2737 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
2735 2738 norepo=True)
2736 2739 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
2737 2740 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
2738 2741 opts = _byteskwargs(opts)
2739 2742
2740 2743 def doinit():
2741 2744 for i in _xrange(10000):
2742 2745 util.lrucachedict(size)
2743 2746
2744 2747 costrange = list(range(mincost, maxcost + 1))
2745 2748
2746 2749 values = []
2747 2750 for i in _xrange(size):
2748 2751 values.append(random.randint(0, _maxint))
2749 2752
2750 2753 # Get mode fills the cache and tests raw lookup performance with no
2751 2754 # eviction.
2752 2755 getseq = []
2753 2756 for i in _xrange(gets):
2754 2757 getseq.append(random.choice(values))
2755 2758
2756 2759 def dogets():
2757 2760 d = util.lrucachedict(size)
2758 2761 for v in values:
2759 2762 d[v] = v
2760 2763 for key in getseq:
2761 2764 value = d[key]
2762 2765 value # silence pyflakes warning
2763 2766
2764 2767 def dogetscost():
2765 2768 d = util.lrucachedict(size, maxcost=costlimit)
2766 2769 for i, v in enumerate(values):
2767 2770 d.insert(v, v, cost=costs[i])
2768 2771 for key in getseq:
2769 2772 try:
2770 2773 value = d[key]
2771 2774 value # silence pyflakes warning
2772 2775 except KeyError:
2773 2776 pass
2774 2777
2775 2778 # Set mode tests insertion speed with cache eviction.
2776 2779 setseq = []
2777 2780 costs = []
2778 2781 for i in _xrange(sets):
2779 2782 setseq.append(random.randint(0, _maxint))
2780 2783 costs.append(random.choice(costrange))
2781 2784
2782 2785 def doinserts():
2783 2786 d = util.lrucachedict(size)
2784 2787 for v in setseq:
2785 2788 d.insert(v, v)
2786 2789
2787 2790 def doinsertscost():
2788 2791 d = util.lrucachedict(size, maxcost=costlimit)
2789 2792 for i, v in enumerate(setseq):
2790 2793 d.insert(v, v, cost=costs[i])
2791 2794
2792 2795 def dosets():
2793 2796 d = util.lrucachedict(size)
2794 2797 for v in setseq:
2795 2798 d[v] = v
2796 2799
2797 2800 # Mixed mode randomly performs gets and sets with eviction.
2798 2801 mixedops = []
2799 2802 for i in _xrange(mixed):
2800 2803 r = random.randint(0, 100)
2801 2804 if r < mixedgetfreq:
2802 2805 op = 0
2803 2806 else:
2804 2807 op = 1
2805 2808
2806 2809 mixedops.append((op,
2807 2810 random.randint(0, size * 2),
2808 2811 random.choice(costrange)))
2809 2812
2810 2813 def domixed():
2811 2814 d = util.lrucachedict(size)
2812 2815
2813 2816 for op, v, cost in mixedops:
2814 2817 if op == 0:
2815 2818 try:
2816 2819 d[v]
2817 2820 except KeyError:
2818 2821 pass
2819 2822 else:
2820 2823 d[v] = v
2821 2824
2822 2825 def domixedcost():
2823 2826 d = util.lrucachedict(size, maxcost=costlimit)
2824 2827
2825 2828 for op, v, cost in mixedops:
2826 2829 if op == 0:
2827 2830 try:
2828 2831 d[v]
2829 2832 except KeyError:
2830 2833 pass
2831 2834 else:
2832 2835 d.insert(v, v, cost=cost)
2833 2836
2834 2837 benches = [
2835 2838 (doinit, b'init'),
2836 2839 ]
2837 2840
2838 2841 if costlimit:
2839 2842 benches.extend([
2840 2843 (dogetscost, b'gets w/ cost limit'),
2841 2844 (doinsertscost, b'inserts w/ cost limit'),
2842 2845 (domixedcost, b'mixed w/ cost limit'),
2843 2846 ])
2844 2847 else:
2845 2848 benches.extend([
2846 2849 (dogets, b'gets'),
2847 2850 (doinserts, b'inserts'),
2848 2851 (dosets, b'sets'),
2849 2852 (domixed, b'mixed')
2850 2853 ])
2851 2854
2852 2855 for fn, title in benches:
2853 2856 timer, fm = gettimer(ui, opts)
2854 2857 timer(fn, title=title)
2855 2858 fm.end()
2856 2859
2857 2860 @command(b'perfwrite', formatteropts)
2858 2861 def perfwrite(ui, repo, **opts):
2859 2862 """microbenchmark ui.write
2860 2863 """
2861 2864 opts = _byteskwargs(opts)
2862 2865
2863 2866 timer, fm = gettimer(ui, opts)
2864 2867 def write():
2865 2868 for i in range(100000):
2866 2869 ui.write((b'Testing write performance\n'))
2867 2870 timer(write)
2868 2871 fm.end()
2869 2872
2870 2873 def uisetup(ui):
2871 2874 if (util.safehasattr(cmdutil, b'openrevlog') and
2872 2875 not util.safehasattr(commands, b'debugrevlogopts')):
2873 2876 # for "historical portability":
2874 2877 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
2875 2878 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
2876 2879 # openrevlog() should cause failure, because it has been
2877 2880 # available since 3.5 (or 49c583ca48c4).
2878 2881 def openrevlog(orig, repo, cmd, file_, opts):
2879 2882 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
2880 2883 raise error.Abort(b"This version doesn't support --dir option",
2881 2884 hint=b"use 3.5 or later")
2882 2885 return orig(repo, cmd, file_, opts)
2883 2886 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
2884 2887
2885 2888 @command(b'perfprogress', formatteropts + [
2886 2889 (b'', b'topic', b'topic', b'topic for progress messages'),
2887 2890 (b'c', b'total', 1000000, b'total value we are progressing to'),
2888 2891 ], norepo=True)
2889 2892 def perfprogress(ui, topic=None, total=None, **opts):
2890 2893 """printing of progress bars"""
2891 2894 opts = _byteskwargs(opts)
2892 2895
2893 2896 timer, fm = gettimer(ui, opts)
2894 2897
2895 2898 def doprogress():
2896 2899 with ui.makeprogress(topic, total=total) as progress:
2897 2900 for i in pycompat.xrange(total):
2898 2901 progress.increment()
2899 2902
2900 2903 timer(doprogress)
2901 2904 fm.end()
General Comments 0
You need to be logged in to leave comments. Login now