##// END OF EJS Templates
py3: use range() instead of xrange()...
Pulkit Goyal -
r42562:c2d10506 default
parent child Browse files
Show More
@@ -1,2904 +1,2904 b''
1 1 # perf.py - performance test routines
2 2 '''helper extension to measure performance
3 3
4 4 Configurations
5 5 ==============
6 6
7 7 ``perf``
8 8 --------
9 9
10 10 ``all-timing``
11 11 When set, additional statistics will be reported for each benchmark: best,
12 12 worst, median average. If not set only the best timing is reported
13 13 (default: off).
14 14
15 15 ``presleep``
16 16 number of second to wait before any group of runs (default: 1)
17 17
18 18 ``pre-run``
19 19 number of run to perform before starting measurement.
20 20
21 21 ``profile-benchmark``
22 22 Enable profiling for the benchmarked section.
23 23 (The first iteration is benchmarked)
24 24
25 25 ``run-limits``
26 26 Control the number of runs each benchmark will perform. The option value
27 27 should be a list of `<time>-<numberofrun>` pairs. After each run the
28 28 conditions are considered in order with the following logic:
29 29
30 30 If benchmark has been running for <time> seconds, and we have performed
31 31 <numberofrun> iterations, stop the benchmark,
32 32
33 33 The default value is: `3.0-100, 10.0-3`
34 34
35 35 ``stub``
36 36 When set, benchmarks will only be run once, useful for testing
37 37 (default: off)
38 38 '''
39 39
40 40 # "historical portability" policy of perf.py:
41 41 #
42 42 # We have to do:
43 43 # - make perf.py "loadable" with as wide Mercurial version as possible
44 44 # This doesn't mean that perf commands work correctly with that Mercurial.
45 45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
46 46 # - make historical perf command work correctly with as wide Mercurial
47 47 # version as possible
48 48 #
49 49 # We have to do, if possible with reasonable cost:
50 50 # - make recent perf command for historical feature work correctly
51 51 # with early Mercurial
52 52 #
53 53 # We don't have to do:
54 54 # - make perf command for recent feature work correctly with early
55 55 # Mercurial
56 56
57 57 from __future__ import absolute_import
58 58 import contextlib
59 59 import functools
60 60 import gc
61 61 import os
62 62 import random
63 63 import shutil
64 64 import struct
65 65 import sys
66 66 import tempfile
67 67 import threading
68 68 import time
69 69 from mercurial import (
70 70 changegroup,
71 71 cmdutil,
72 72 commands,
73 73 copies,
74 74 error,
75 75 extensions,
76 76 hg,
77 77 mdiff,
78 78 merge,
79 79 revlog,
80 80 util,
81 81 )
82 82
83 83 # for "historical portability":
84 84 # try to import modules separately (in dict order), and ignore
85 85 # failure, because these aren't available with early Mercurial
86 86 try:
87 87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
88 88 except ImportError:
89 89 pass
90 90 try:
91 91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
92 92 except ImportError:
93 93 pass
94 94 try:
95 95 from mercurial import registrar # since 3.7 (or 37d50250b696)
96 96 dir(registrar) # forcibly load it
97 97 except ImportError:
98 98 registrar = None
99 99 try:
100 100 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
101 101 except ImportError:
102 102 pass
103 103 try:
104 104 from mercurial.utils import repoviewutil # since 5.0
105 105 except ImportError:
106 106 repoviewutil = None
107 107 try:
108 108 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
109 109 except ImportError:
110 110 pass
111 111 try:
112 112 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
113 113 except ImportError:
114 114 pass
115 115
116 116 try:
117 117 from mercurial import profiling
118 118 except ImportError:
119 119 profiling = None
120 120
121 121 def identity(a):
122 122 return a
123 123
124 124 try:
125 125 from mercurial import pycompat
126 126 getargspec = pycompat.getargspec # added to module after 4.5
127 127 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
128 128 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
129 129 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
130 130 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
131 131 if pycompat.ispy3:
132 132 _maxint = sys.maxsize # per py3 docs for replacing maxint
133 133 else:
134 134 _maxint = sys.maxint
135 135 except (ImportError, AttributeError):
136 136 import inspect
137 137 getargspec = inspect.getargspec
138 138 _byteskwargs = identity
139 139 fsencode = identity # no py3 support
140 140 _maxint = sys.maxint # no py3 support
141 141 _sysstr = lambda x: x # no py3 support
142 142 _xrange = xrange
143 143
144 144 try:
145 145 # 4.7+
146 146 queue = pycompat.queue.Queue
147 147 except (AttributeError, ImportError):
148 148 # <4.7.
149 149 try:
150 150 queue = pycompat.queue
151 151 except (AttributeError, ImportError):
152 152 queue = util.queue
153 153
154 154 try:
155 155 from mercurial import logcmdutil
156 156 makelogtemplater = logcmdutil.maketemplater
157 157 except (AttributeError, ImportError):
158 158 try:
159 159 makelogtemplater = cmdutil.makelogtemplater
160 160 except (AttributeError, ImportError):
161 161 makelogtemplater = None
162 162
163 163 # for "historical portability":
164 164 # define util.safehasattr forcibly, because util.safehasattr has been
165 165 # available since 1.9.3 (or 94b200a11cf7)
166 166 _undefined = object()
167 167 def safehasattr(thing, attr):
168 168 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
169 169 setattr(util, 'safehasattr', safehasattr)
170 170
171 171 # for "historical portability":
172 172 # define util.timer forcibly, because util.timer has been available
173 173 # since ae5d60bb70c9
174 174 if safehasattr(time, 'perf_counter'):
175 175 util.timer = time.perf_counter
176 176 elif os.name == b'nt':
177 177 util.timer = time.clock
178 178 else:
179 179 util.timer = time.time
180 180
181 181 # for "historical portability":
182 182 # use locally defined empty option list, if formatteropts isn't
183 183 # available, because commands.formatteropts has been available since
184 184 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
185 185 # available since 2.2 (or ae5f92e154d3)
186 186 formatteropts = getattr(cmdutil, "formatteropts",
187 187 getattr(commands, "formatteropts", []))
188 188
189 189 # for "historical portability":
190 190 # use locally defined option list, if debugrevlogopts isn't available,
191 191 # because commands.debugrevlogopts has been available since 3.7 (or
192 192 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
193 193 # since 1.9 (or a79fea6b3e77).
194 194 revlogopts = getattr(cmdutil, "debugrevlogopts",
195 195 getattr(commands, "debugrevlogopts", [
196 196 (b'c', b'changelog', False, (b'open changelog')),
197 197 (b'm', b'manifest', False, (b'open manifest')),
198 198 (b'', b'dir', False, (b'open directory manifest')),
199 199 ]))
200 200
201 201 cmdtable = {}
202 202
203 203 # for "historical portability":
204 204 # define parsealiases locally, because cmdutil.parsealiases has been
205 205 # available since 1.5 (or 6252852b4332)
206 206 def parsealiases(cmd):
207 207 return cmd.split(b"|")
208 208
209 209 if safehasattr(registrar, 'command'):
210 210 command = registrar.command(cmdtable)
211 211 elif safehasattr(cmdutil, 'command'):
212 212 command = cmdutil.command(cmdtable)
213 213 if b'norepo' not in getargspec(command).args:
214 214 # for "historical portability":
215 215 # wrap original cmdutil.command, because "norepo" option has
216 216 # been available since 3.1 (or 75a96326cecb)
217 217 _command = command
218 218 def command(name, options=(), synopsis=None, norepo=False):
219 219 if norepo:
220 220 commands.norepo += b' %s' % b' '.join(parsealiases(name))
221 221 return _command(name, list(options), synopsis)
222 222 else:
223 223 # for "historical portability":
224 224 # define "@command" annotation locally, because cmdutil.command
225 225 # has been available since 1.9 (or 2daa5179e73f)
226 226 def command(name, options=(), synopsis=None, norepo=False):
227 227 def decorator(func):
228 228 if synopsis:
229 229 cmdtable[name] = func, list(options), synopsis
230 230 else:
231 231 cmdtable[name] = func, list(options)
232 232 if norepo:
233 233 commands.norepo += b' %s' % b' '.join(parsealiases(name))
234 234 return func
235 235 return decorator
236 236
237 237 try:
238 238 import mercurial.registrar
239 239 import mercurial.configitems
240 240 configtable = {}
241 241 configitem = mercurial.registrar.configitem(configtable)
242 242 configitem(b'perf', b'presleep',
243 243 default=mercurial.configitems.dynamicdefault,
244 244 )
245 245 configitem(b'perf', b'stub',
246 246 default=mercurial.configitems.dynamicdefault,
247 247 )
248 248 configitem(b'perf', b'parentscount',
249 249 default=mercurial.configitems.dynamicdefault,
250 250 )
251 251 configitem(b'perf', b'all-timing',
252 252 default=mercurial.configitems.dynamicdefault,
253 253 )
254 254 configitem(b'perf', b'pre-run',
255 255 default=mercurial.configitems.dynamicdefault,
256 256 )
257 257 configitem(b'perf', b'profile-benchmark',
258 258 default=mercurial.configitems.dynamicdefault,
259 259 )
260 260 configitem(b'perf', b'run-limits',
261 261 default=mercurial.configitems.dynamicdefault,
262 262 )
263 263 except (ImportError, AttributeError):
264 264 pass
265 265
266 266 def getlen(ui):
267 267 if ui.configbool(b"perf", b"stub", False):
268 268 return lambda x: 1
269 269 return len
270 270
271 271 class noop(object):
272 272 """dummy context manager"""
273 273 def __enter__(self):
274 274 pass
275 275 def __exit__(self, *args):
276 276 pass
277 277
278 278 NOOPCTX = noop()
279 279
280 280 def gettimer(ui, opts=None):
281 281 """return a timer function and formatter: (timer, formatter)
282 282
283 283 This function exists to gather the creation of formatter in a single
284 284 place instead of duplicating it in all performance commands."""
285 285
286 286 # enforce an idle period before execution to counteract power management
287 287 # experimental config: perf.presleep
288 288 time.sleep(getint(ui, b"perf", b"presleep", 1))
289 289
290 290 if opts is None:
291 291 opts = {}
292 292 # redirect all to stderr unless buffer api is in use
293 293 if not ui._buffers:
294 294 ui = ui.copy()
295 295 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
296 296 if uifout:
297 297 # for "historical portability":
298 298 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
299 299 uifout.set(ui.ferr)
300 300
301 301 # get a formatter
302 302 uiformatter = getattr(ui, 'formatter', None)
303 303 if uiformatter:
304 304 fm = uiformatter(b'perf', opts)
305 305 else:
306 306 # for "historical portability":
307 307 # define formatter locally, because ui.formatter has been
308 308 # available since 2.2 (or ae5f92e154d3)
309 309 from mercurial import node
310 310 class defaultformatter(object):
311 311 """Minimized composition of baseformatter and plainformatter
312 312 """
313 313 def __init__(self, ui, topic, opts):
314 314 self._ui = ui
315 315 if ui.debugflag:
316 316 self.hexfunc = node.hex
317 317 else:
318 318 self.hexfunc = node.short
319 319 def __nonzero__(self):
320 320 return False
321 321 __bool__ = __nonzero__
322 322 def startitem(self):
323 323 pass
324 324 def data(self, **data):
325 325 pass
326 326 def write(self, fields, deftext, *fielddata, **opts):
327 327 self._ui.write(deftext % fielddata, **opts)
328 328 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
329 329 if cond:
330 330 self._ui.write(deftext % fielddata, **opts)
331 331 def plain(self, text, **opts):
332 332 self._ui.write(text, **opts)
333 333 def end(self):
334 334 pass
335 335 fm = defaultformatter(ui, b'perf', opts)
336 336
337 337 # stub function, runs code only once instead of in a loop
338 338 # experimental config: perf.stub
339 339 if ui.configbool(b"perf", b"stub", False):
340 340 return functools.partial(stub_timer, fm), fm
341 341
342 342 # experimental config: perf.all-timing
343 343 displayall = ui.configbool(b"perf", b"all-timing", False)
344 344
345 345 # experimental config: perf.run-limits
346 346 limitspec = ui.configlist(b"perf", b"run-limits", [])
347 347 limits = []
348 348 for item in limitspec:
349 349 parts = item.split(b'-', 1)
350 350 if len(parts) < 2:
351 351 ui.warn((b'malformatted run limit entry, missing "-": %s\n'
352 352 % item))
353 353 continue
354 354 try:
355 355 time_limit = float(pycompat.sysstr(parts[0]))
356 356 except ValueError as e:
357 357 ui.warn((b'malformatted run limit entry, %s: %s\n'
358 358 % (pycompat.bytestr(e), item)))
359 359 continue
360 360 try:
361 361 run_limit = int(pycompat.sysstr(parts[1]))
362 362 except ValueError as e:
363 363 ui.warn((b'malformatted run limit entry, %s: %s\n'
364 364 % (pycompat.bytestr(e), item)))
365 365 continue
366 366 limits.append((time_limit, run_limit))
367 367 if not limits:
368 368 limits = DEFAULTLIMITS
369 369
370 370 profiler = None
371 371 if profiling is not None:
372 372 if ui.configbool(b"perf", b"profile-benchmark", False):
373 373 profiler = profiling.profile(ui)
374 374
375 375 prerun = getint(ui, b"perf", b"pre-run", 0)
376 376 t = functools.partial(_timer, fm, displayall=displayall, limits=limits,
377 377 prerun=prerun, profiler=profiler)
378 378 return t, fm
379 379
380 380 def stub_timer(fm, func, setup=None, title=None):
381 381 if setup is not None:
382 382 setup()
383 383 func()
384 384
385 385 @contextlib.contextmanager
386 386 def timeone():
387 387 r = []
388 388 ostart = os.times()
389 389 cstart = util.timer()
390 390 yield r
391 391 cstop = util.timer()
392 392 ostop = os.times()
393 393 a, b = ostart, ostop
394 394 r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
395 395
396 396
397 397 # list of stop condition (elapsed time, minimal run count)
398 398 DEFAULTLIMITS = (
399 399 (3.0, 100),
400 400 (10.0, 3),
401 401 )
402 402
403 403 def _timer(fm, func, setup=None, title=None, displayall=False,
404 404 limits=DEFAULTLIMITS, prerun=0, profiler=None):
405 405 gc.collect()
406 406 results = []
407 407 begin = util.timer()
408 408 count = 0
409 409 if profiler is None:
410 410 profiler = NOOPCTX
411 for i in xrange(prerun):
411 for i in range(prerun):
412 412 if setup is not None:
413 413 setup()
414 414 func()
415 415 keepgoing = True
416 416 while keepgoing:
417 417 if setup is not None:
418 418 setup()
419 419 with profiler:
420 420 with timeone() as item:
421 421 r = func()
422 422 profiler = NOOPCTX
423 423 count += 1
424 424 results.append(item[0])
425 425 cstop = util.timer()
426 426 # Look for a stop condition.
427 427 elapsed = cstop - begin
428 428 for t, mincount in limits:
429 429 if elapsed >= t and count >= mincount:
430 430 keepgoing = False
431 431 break
432 432
433 433 formatone(fm, results, title=title, result=r,
434 434 displayall=displayall)
435 435
436 436 def formatone(fm, timings, title=None, result=None, displayall=False):
437 437
438 438 count = len(timings)
439 439
440 440 fm.startitem()
441 441
442 442 if title:
443 443 fm.write(b'title', b'! %s\n', title)
444 444 if result:
445 445 fm.write(b'result', b'! result: %s\n', result)
446 446 def display(role, entry):
447 447 prefix = b''
448 448 if role != b'best':
449 449 prefix = b'%s.' % role
450 450 fm.plain(b'!')
451 451 fm.write(prefix + b'wall', b' wall %f', entry[0])
452 452 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
453 453 fm.write(prefix + b'user', b' user %f', entry[1])
454 454 fm.write(prefix + b'sys', b' sys %f', entry[2])
455 455 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
456 456 fm.plain(b'\n')
457 457 timings.sort()
458 458 min_val = timings[0]
459 459 display(b'best', min_val)
460 460 if displayall:
461 461 max_val = timings[-1]
462 462 display(b'max', max_val)
463 463 avg = tuple([sum(x) / count for x in zip(*timings)])
464 464 display(b'avg', avg)
465 465 median = timings[len(timings) // 2]
466 466 display(b'median', median)
467 467
468 468 # utilities for historical portability
469 469
470 470 def getint(ui, section, name, default):
471 471 # for "historical portability":
472 472 # ui.configint has been available since 1.9 (or fa2b596db182)
473 473 v = ui.config(section, name, None)
474 474 if v is None:
475 475 return default
476 476 try:
477 477 return int(v)
478 478 except ValueError:
479 479 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
480 480 % (section, name, v))
481 481
482 482 def safeattrsetter(obj, name, ignoremissing=False):
483 483 """Ensure that 'obj' has 'name' attribute before subsequent setattr
484 484
485 485 This function is aborted, if 'obj' doesn't have 'name' attribute
486 486 at runtime. This avoids overlooking removal of an attribute, which
487 487 breaks assumption of performance measurement, in the future.
488 488
489 489 This function returns the object to (1) assign a new value, and
490 490 (2) restore an original value to the attribute.
491 491
492 492 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
493 493 abortion, and this function returns None. This is useful to
494 494 examine an attribute, which isn't ensured in all Mercurial
495 495 versions.
496 496 """
497 497 if not util.safehasattr(obj, name):
498 498 if ignoremissing:
499 499 return None
500 500 raise error.Abort((b"missing attribute %s of %s might break assumption"
501 501 b" of performance measurement") % (name, obj))
502 502
503 503 origvalue = getattr(obj, _sysstr(name))
504 504 class attrutil(object):
505 505 def set(self, newvalue):
506 506 setattr(obj, _sysstr(name), newvalue)
507 507 def restore(self):
508 508 setattr(obj, _sysstr(name), origvalue)
509 509
510 510 return attrutil()
511 511
512 512 # utilities to examine each internal API changes
513 513
514 514 def getbranchmapsubsettable():
515 515 # for "historical portability":
516 516 # subsettable is defined in:
517 517 # - branchmap since 2.9 (or 175c6fd8cacc)
518 518 # - repoview since 2.5 (or 59a9f18d4587)
519 519 # - repoviewutil since 5.0
520 520 for mod in (branchmap, repoview, repoviewutil):
521 521 subsettable = getattr(mod, 'subsettable', None)
522 522 if subsettable:
523 523 return subsettable
524 524
525 525 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
526 526 # branchmap and repoview modules exist, but subsettable attribute
527 527 # doesn't)
528 528 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
529 529 hint=b"use 2.5 or later")
530 530
531 531 def getsvfs(repo):
532 532 """Return appropriate object to access files under .hg/store
533 533 """
534 534 # for "historical portability":
535 535 # repo.svfs has been available since 2.3 (or 7034365089bf)
536 536 svfs = getattr(repo, 'svfs', None)
537 537 if svfs:
538 538 return svfs
539 539 else:
540 540 return getattr(repo, 'sopener')
541 541
542 542 def getvfs(repo):
543 543 """Return appropriate object to access files under .hg
544 544 """
545 545 # for "historical portability":
546 546 # repo.vfs has been available since 2.3 (or 7034365089bf)
547 547 vfs = getattr(repo, 'vfs', None)
548 548 if vfs:
549 549 return vfs
550 550 else:
551 551 return getattr(repo, 'opener')
552 552
553 553 def repocleartagscachefunc(repo):
554 554 """Return the function to clear tags cache according to repo internal API
555 555 """
556 556 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
557 557 # in this case, setattr(repo, '_tagscache', None) or so isn't
558 558 # correct way to clear tags cache, because existing code paths
559 559 # expect _tagscache to be a structured object.
560 560 def clearcache():
561 561 # _tagscache has been filteredpropertycache since 2.5 (or
562 562 # 98c867ac1330), and delattr() can't work in such case
563 563 if b'_tagscache' in vars(repo):
564 564 del repo.__dict__[b'_tagscache']
565 565 return clearcache
566 566
567 567 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
568 568 if repotags: # since 1.4 (or 5614a628d173)
569 569 return lambda : repotags.set(None)
570 570
571 571 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
572 572 if repotagscache: # since 0.6 (or d7df759d0e97)
573 573 return lambda : repotagscache.set(None)
574 574
575 575 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
576 576 # this point, but it isn't so problematic, because:
577 577 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
578 578 # in perftags() causes failure soon
579 579 # - perf.py itself has been available since 1.1 (or eb240755386d)
580 580 raise error.Abort((b"tags API of this hg command is unknown"))
581 581
582 582 # utilities to clear cache
583 583
584 584 def clearfilecache(obj, attrname):
585 585 unfiltered = getattr(obj, 'unfiltered', None)
586 586 if unfiltered is not None:
587 587 obj = obj.unfiltered()
588 588 if attrname in vars(obj):
589 589 delattr(obj, attrname)
590 590 obj._filecache.pop(attrname, None)
591 591
592 592 def clearchangelog(repo):
593 593 if repo is not repo.unfiltered():
594 594 object.__setattr__(repo, r'_clcachekey', None)
595 595 object.__setattr__(repo, r'_clcache', None)
596 596 clearfilecache(repo.unfiltered(), 'changelog')
597 597
598 598 # perf commands
599 599
600 600 @command(b'perfwalk', formatteropts)
601 601 def perfwalk(ui, repo, *pats, **opts):
602 602 opts = _byteskwargs(opts)
603 603 timer, fm = gettimer(ui, opts)
604 604 m = scmutil.match(repo[None], pats, {})
605 605 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
606 606 ignored=False))))
607 607 fm.end()
608 608
609 609 @command(b'perfannotate', formatteropts)
610 610 def perfannotate(ui, repo, f, **opts):
611 611 opts = _byteskwargs(opts)
612 612 timer, fm = gettimer(ui, opts)
613 613 fc = repo[b'.'][f]
614 614 timer(lambda: len(fc.annotate(True)))
615 615 fm.end()
616 616
617 617 @command(b'perfstatus',
618 618 [(b'u', b'unknown', False,
619 619 b'ask status to look for unknown files')] + formatteropts)
620 620 def perfstatus(ui, repo, **opts):
621 621 opts = _byteskwargs(opts)
622 622 #m = match.always(repo.root, repo.getcwd())
623 623 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
624 624 # False))))
625 625 timer, fm = gettimer(ui, opts)
626 626 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
627 627 fm.end()
628 628
629 629 @command(b'perfaddremove', formatteropts)
630 630 def perfaddremove(ui, repo, **opts):
631 631 opts = _byteskwargs(opts)
632 632 timer, fm = gettimer(ui, opts)
633 633 try:
634 634 oldquiet = repo.ui.quiet
635 635 repo.ui.quiet = True
636 636 matcher = scmutil.match(repo[None])
637 637 opts[b'dry_run'] = True
638 638 if b'uipathfn' in getargspec(scmutil.addremove).args:
639 639 uipathfn = scmutil.getuipathfn(repo)
640 640 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
641 641 else:
642 642 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
643 643 finally:
644 644 repo.ui.quiet = oldquiet
645 645 fm.end()
646 646
647 647 def clearcaches(cl):
648 648 # behave somewhat consistently across internal API changes
649 649 if util.safehasattr(cl, b'clearcaches'):
650 650 cl.clearcaches()
651 651 elif util.safehasattr(cl, b'_nodecache'):
652 652 from mercurial.node import nullid, nullrev
653 653 cl._nodecache = {nullid: nullrev}
654 654 cl._nodepos = None
655 655
656 656 @command(b'perfheads', formatteropts)
657 657 def perfheads(ui, repo, **opts):
658 658 """benchmark the computation of a changelog heads"""
659 659 opts = _byteskwargs(opts)
660 660 timer, fm = gettimer(ui, opts)
661 661 cl = repo.changelog
662 662 def s():
663 663 clearcaches(cl)
664 664 def d():
665 665 len(cl.headrevs())
666 666 timer(d, setup=s)
667 667 fm.end()
668 668
669 669 @command(b'perftags', formatteropts+
670 670 [
671 671 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
672 672 ])
673 673 def perftags(ui, repo, **opts):
674 674 opts = _byteskwargs(opts)
675 675 timer, fm = gettimer(ui, opts)
676 676 repocleartagscache = repocleartagscachefunc(repo)
677 677 clearrevlogs = opts[b'clear_revlogs']
678 678 def s():
679 679 if clearrevlogs:
680 680 clearchangelog(repo)
681 681 clearfilecache(repo.unfiltered(), 'manifest')
682 682 repocleartagscache()
683 683 def t():
684 684 return len(repo.tags())
685 685 timer(t, setup=s)
686 686 fm.end()
687 687
688 688 @command(b'perfancestors', formatteropts)
689 689 def perfancestors(ui, repo, **opts):
690 690 opts = _byteskwargs(opts)
691 691 timer, fm = gettimer(ui, opts)
692 692 heads = repo.changelog.headrevs()
693 693 def d():
694 694 for a in repo.changelog.ancestors(heads):
695 695 pass
696 696 timer(d)
697 697 fm.end()
698 698
699 699 @command(b'perfancestorset', formatteropts)
700 700 def perfancestorset(ui, repo, revset, **opts):
701 701 opts = _byteskwargs(opts)
702 702 timer, fm = gettimer(ui, opts)
703 703 revs = repo.revs(revset)
704 704 heads = repo.changelog.headrevs()
705 705 def d():
706 706 s = repo.changelog.ancestors(heads)
707 707 for rev in revs:
708 708 rev in s
709 709 timer(d)
710 710 fm.end()
711 711
712 712 @command(b'perfdiscovery', formatteropts, b'PATH')
713 713 def perfdiscovery(ui, repo, path, **opts):
714 714 """benchmark discovery between local repo and the peer at given path
715 715 """
716 716 repos = [repo, None]
717 717 timer, fm = gettimer(ui, opts)
718 718 path = ui.expandpath(path)
719 719
720 720 def s():
721 721 repos[1] = hg.peer(ui, opts, path)
722 722 def d():
723 723 setdiscovery.findcommonheads(ui, *repos)
724 724 timer(d, setup=s)
725 725 fm.end()
726 726
727 727 @command(b'perfbookmarks', formatteropts +
728 728 [
729 729 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
730 730 ])
731 731 def perfbookmarks(ui, repo, **opts):
732 732 """benchmark parsing bookmarks from disk to memory"""
733 733 opts = _byteskwargs(opts)
734 734 timer, fm = gettimer(ui, opts)
735 735
736 736 clearrevlogs = opts[b'clear_revlogs']
737 737 def s():
738 738 if clearrevlogs:
739 739 clearchangelog(repo)
740 740 clearfilecache(repo, b'_bookmarks')
741 741 def d():
742 742 repo._bookmarks
743 743 timer(d, setup=s)
744 744 fm.end()
745 745
746 746 @command(b'perfbundleread', formatteropts, b'BUNDLE')
747 747 def perfbundleread(ui, repo, bundlepath, **opts):
748 748 """Benchmark reading of bundle files.
749 749
750 750 This command is meant to isolate the I/O part of bundle reading as
751 751 much as possible.
752 752 """
753 753 from mercurial import (
754 754 bundle2,
755 755 exchange,
756 756 streamclone,
757 757 )
758 758
759 759 opts = _byteskwargs(opts)
760 760
761 761 def makebench(fn):
762 762 def run():
763 763 with open(bundlepath, b'rb') as fh:
764 764 bundle = exchange.readbundle(ui, fh, bundlepath)
765 765 fn(bundle)
766 766
767 767 return run
768 768
769 769 def makereadnbytes(size):
770 770 def run():
771 771 with open(bundlepath, b'rb') as fh:
772 772 bundle = exchange.readbundle(ui, fh, bundlepath)
773 773 while bundle.read(size):
774 774 pass
775 775
776 776 return run
777 777
778 778 def makestdioread(size):
779 779 def run():
780 780 with open(bundlepath, b'rb') as fh:
781 781 while fh.read(size):
782 782 pass
783 783
784 784 return run
785 785
786 786 # bundle1
787 787
788 788 def deltaiter(bundle):
789 789 for delta in bundle.deltaiter():
790 790 pass
791 791
792 792 def iterchunks(bundle):
793 793 for chunk in bundle.getchunks():
794 794 pass
795 795
796 796 # bundle2
797 797
798 798 def forwardchunks(bundle):
799 799 for chunk in bundle._forwardchunks():
800 800 pass
801 801
802 802 def iterparts(bundle):
803 803 for part in bundle.iterparts():
804 804 pass
805 805
806 806 def iterpartsseekable(bundle):
807 807 for part in bundle.iterparts(seekable=True):
808 808 pass
809 809
810 810 def seek(bundle):
811 811 for part in bundle.iterparts(seekable=True):
812 812 part.seek(0, os.SEEK_END)
813 813
814 814 def makepartreadnbytes(size):
815 815 def run():
816 816 with open(bundlepath, b'rb') as fh:
817 817 bundle = exchange.readbundle(ui, fh, bundlepath)
818 818 for part in bundle.iterparts():
819 819 while part.read(size):
820 820 pass
821 821
822 822 return run
823 823
824 824 benches = [
825 825 (makestdioread(8192), b'read(8k)'),
826 826 (makestdioread(16384), b'read(16k)'),
827 827 (makestdioread(32768), b'read(32k)'),
828 828 (makestdioread(131072), b'read(128k)'),
829 829 ]
830 830
831 831 with open(bundlepath, b'rb') as fh:
832 832 bundle = exchange.readbundle(ui, fh, bundlepath)
833 833
834 834 if isinstance(bundle, changegroup.cg1unpacker):
835 835 benches.extend([
836 836 (makebench(deltaiter), b'cg1 deltaiter()'),
837 837 (makebench(iterchunks), b'cg1 getchunks()'),
838 838 (makereadnbytes(8192), b'cg1 read(8k)'),
839 839 (makereadnbytes(16384), b'cg1 read(16k)'),
840 840 (makereadnbytes(32768), b'cg1 read(32k)'),
841 841 (makereadnbytes(131072), b'cg1 read(128k)'),
842 842 ])
843 843 elif isinstance(bundle, bundle2.unbundle20):
844 844 benches.extend([
845 845 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
846 846 (makebench(iterparts), b'bundle2 iterparts()'),
847 847 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
848 848 (makebench(seek), b'bundle2 part seek()'),
849 849 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
850 850 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
851 851 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
852 852 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
853 853 ])
854 854 elif isinstance(bundle, streamclone.streamcloneapplier):
855 855 raise error.Abort(b'stream clone bundles not supported')
856 856 else:
857 857 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
858 858
859 859 for fn, title in benches:
860 860 timer, fm = gettimer(ui, opts)
861 861 timer(fn, title=title)
862 862 fm.end()
863 863
864 864 @command(b'perfchangegroupchangelog', formatteropts +
865 865 [(b'', b'cgversion', b'02', b'changegroup version'),
866 866 (b'r', b'rev', b'', b'revisions to add to changegroup')])
867 867 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
868 868 """Benchmark producing a changelog group for a changegroup.
869 869
870 870 This measures the time spent processing the changelog during a
871 871 bundle operation. This occurs during `hg bundle` and on a server
872 872 processing a `getbundle` wire protocol request (handles clones
873 873 and pull requests).
874 874
875 875 By default, all revisions are added to the changegroup.
876 876 """
877 877 opts = _byteskwargs(opts)
878 878 cl = repo.changelog
879 879 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
880 880 bundler = changegroup.getbundler(cgversion, repo)
881 881
882 882 def d():
883 883 state, chunks = bundler._generatechangelog(cl, nodes)
884 884 for chunk in chunks:
885 885 pass
886 886
887 887 timer, fm = gettimer(ui, opts)
888 888
889 889 # Terminal printing can interfere with timing. So disable it.
890 890 with ui.configoverride({(b'progress', b'disable'): True}):
891 891 timer(d)
892 892
893 893 fm.end()
894 894
895 895 @command(b'perfdirs', formatteropts)
896 896 def perfdirs(ui, repo, **opts):
897 897 opts = _byteskwargs(opts)
898 898 timer, fm = gettimer(ui, opts)
899 899 dirstate = repo.dirstate
900 900 b'a' in dirstate
901 901 def d():
902 902 dirstate.hasdir(b'a')
903 903 del dirstate._map._dirs
904 904 timer(d)
905 905 fm.end()
906 906
907 907 @command(b'perfdirstate', formatteropts)
908 908 def perfdirstate(ui, repo, **opts):
909 909 opts = _byteskwargs(opts)
910 910 timer, fm = gettimer(ui, opts)
911 911 b"a" in repo.dirstate
912 912 def d():
913 913 repo.dirstate.invalidate()
914 914 b"a" in repo.dirstate
915 915 timer(d)
916 916 fm.end()
917 917
918 918 @command(b'perfdirstatedirs', formatteropts)
919 919 def perfdirstatedirs(ui, repo, **opts):
920 920 opts = _byteskwargs(opts)
921 921 timer, fm = gettimer(ui, opts)
922 922 b"a" in repo.dirstate
923 923 def d():
924 924 repo.dirstate.hasdir(b"a")
925 925 del repo.dirstate._map._dirs
926 926 timer(d)
927 927 fm.end()
928 928
929 929 @command(b'perfdirstatefoldmap', formatteropts)
930 930 def perfdirstatefoldmap(ui, repo, **opts):
931 931 opts = _byteskwargs(opts)
932 932 timer, fm = gettimer(ui, opts)
933 933 dirstate = repo.dirstate
934 934 b'a' in dirstate
935 935 def d():
936 936 dirstate._map.filefoldmap.get(b'a')
937 937 del dirstate._map.filefoldmap
938 938 timer(d)
939 939 fm.end()
940 940
941 941 @command(b'perfdirfoldmap', formatteropts)
942 942 def perfdirfoldmap(ui, repo, **opts):
943 943 opts = _byteskwargs(opts)
944 944 timer, fm = gettimer(ui, opts)
945 945 dirstate = repo.dirstate
946 946 b'a' in dirstate
947 947 def d():
948 948 dirstate._map.dirfoldmap.get(b'a')
949 949 del dirstate._map.dirfoldmap
950 950 del dirstate._map._dirs
951 951 timer(d)
952 952 fm.end()
953 953
954 954 @command(b'perfdirstatewrite', formatteropts)
955 955 def perfdirstatewrite(ui, repo, **opts):
956 956 opts = _byteskwargs(opts)
957 957 timer, fm = gettimer(ui, opts)
958 958 ds = repo.dirstate
959 959 b"a" in ds
960 960 def d():
961 961 ds._dirty = True
962 962 ds.write(repo.currenttransaction())
963 963 timer(d)
964 964 fm.end()
965 965
966 966 @command(b'perfmergecalculate',
967 967 [(b'r', b'rev', b'.', b'rev to merge against')] + formatteropts)
968 968 def perfmergecalculate(ui, repo, rev, **opts):
969 969 opts = _byteskwargs(opts)
970 970 timer, fm = gettimer(ui, opts)
971 971 wctx = repo[None]
972 972 rctx = scmutil.revsingle(repo, rev, rev)
973 973 ancestor = wctx.ancestor(rctx)
974 974 # we don't want working dir files to be stat'd in the benchmark, so prime
975 975 # that cache
976 976 wctx.dirty()
977 977 def d():
978 978 # acceptremote is True because we don't want prompts in the middle of
979 979 # our benchmark
980 980 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
981 981 acceptremote=True, followcopies=True)
982 982 timer(d)
983 983 fm.end()
984 984
985 985 @command(b'perfpathcopies', [], b"REV REV")
986 986 def perfpathcopies(ui, repo, rev1, rev2, **opts):
987 987 """benchmark the copy tracing logic"""
988 988 opts = _byteskwargs(opts)
989 989 timer, fm = gettimer(ui, opts)
990 990 ctx1 = scmutil.revsingle(repo, rev1, rev1)
991 991 ctx2 = scmutil.revsingle(repo, rev2, rev2)
992 992 def d():
993 993 copies.pathcopies(ctx1, ctx2)
994 994 timer(d)
995 995 fm.end()
996 996
997 997 @command(b'perfphases',
998 998 [(b'', b'full', False, b'include file reading time too'),
999 999 ], b"")
1000 1000 def perfphases(ui, repo, **opts):
1001 1001 """benchmark phasesets computation"""
1002 1002 opts = _byteskwargs(opts)
1003 1003 timer, fm = gettimer(ui, opts)
1004 1004 _phases = repo._phasecache
1005 1005 full = opts.get(b'full')
1006 1006 def d():
1007 1007 phases = _phases
1008 1008 if full:
1009 1009 clearfilecache(repo, b'_phasecache')
1010 1010 phases = repo._phasecache
1011 1011 phases.invalidate()
1012 1012 phases.loadphaserevs(repo)
1013 1013 timer(d)
1014 1014 fm.end()
1015 1015
1016 1016 @command(b'perfphasesremote',
1017 1017 [], b"[DEST]")
1018 1018 def perfphasesremote(ui, repo, dest=None, **opts):
1019 1019 """benchmark time needed to analyse phases of the remote server"""
1020 1020 from mercurial.node import (
1021 1021 bin,
1022 1022 )
1023 1023 from mercurial import (
1024 1024 exchange,
1025 1025 hg,
1026 1026 phases,
1027 1027 )
1028 1028 opts = _byteskwargs(opts)
1029 1029 timer, fm = gettimer(ui, opts)
1030 1030
1031 1031 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
1032 1032 if not path:
1033 1033 raise error.Abort((b'default repository not configured!'),
1034 1034 hint=(b"see 'hg help config.paths'"))
1035 1035 dest = path.pushloc or path.loc
1036 1036 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
1037 1037 other = hg.peer(repo, opts, dest)
1038 1038
1039 1039 # easier to perform discovery through the operation
1040 1040 op = exchange.pushoperation(repo, other)
1041 1041 exchange._pushdiscoverychangeset(op)
1042 1042
1043 1043 remotesubset = op.fallbackheads
1044 1044
1045 1045 with other.commandexecutor() as e:
1046 1046 remotephases = e.callcommand(b'listkeys',
1047 1047 {b'namespace': b'phases'}).result()
1048 1048 del other
1049 1049 publishing = remotephases.get(b'publishing', False)
1050 1050 if publishing:
1051 1051 ui.status((b'publishing: yes\n'))
1052 1052 else:
1053 1053 ui.status((b'publishing: no\n'))
1054 1054
1055 1055 nodemap = repo.changelog.nodemap
1056 1056 nonpublishroots = 0
1057 1057 for nhex, phase in remotephases.iteritems():
1058 1058 if nhex == b'publishing': # ignore data related to publish option
1059 1059 continue
1060 1060 node = bin(nhex)
1061 1061 if node in nodemap and int(phase):
1062 1062 nonpublishroots += 1
1063 1063 ui.status((b'number of roots: %d\n') % len(remotephases))
1064 1064 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
1065 1065 def d():
1066 1066 phases.remotephasessummary(repo,
1067 1067 remotesubset,
1068 1068 remotephases)
1069 1069 timer(d)
1070 1070 fm.end()
1071 1071
1072 1072 @command(b'perfmanifest',[
1073 1073 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1074 1074 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1075 1075 ] + formatteropts, b'REV|NODE')
1076 1076 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1077 1077 """benchmark the time to read a manifest from disk and return a usable
1078 1078 dict-like object
1079 1079
1080 1080 Manifest caches are cleared before retrieval."""
1081 1081 opts = _byteskwargs(opts)
1082 1082 timer, fm = gettimer(ui, opts)
1083 1083 if not manifest_rev:
1084 1084 ctx = scmutil.revsingle(repo, rev, rev)
1085 1085 t = ctx.manifestnode()
1086 1086 else:
1087 1087 from mercurial.node import bin
1088 1088
1089 1089 if len(rev) == 40:
1090 1090 t = bin(rev)
1091 1091 else:
1092 1092 try:
1093 1093 rev = int(rev)
1094 1094
1095 1095 if util.safehasattr(repo.manifestlog, b'getstorage'):
1096 1096 t = repo.manifestlog.getstorage(b'').node(rev)
1097 1097 else:
1098 1098 t = repo.manifestlog._revlog.lookup(rev)
1099 1099 except ValueError:
1100 1100 raise error.Abort(b'manifest revision must be integer or full '
1101 1101 b'node')
1102 1102 def d():
1103 1103 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1104 1104 repo.manifestlog[t].read()
1105 1105 timer(d)
1106 1106 fm.end()
1107 1107
1108 1108 @command(b'perfchangeset', formatteropts)
1109 1109 def perfchangeset(ui, repo, rev, **opts):
1110 1110 opts = _byteskwargs(opts)
1111 1111 timer, fm = gettimer(ui, opts)
1112 1112 n = scmutil.revsingle(repo, rev).node()
1113 1113 def d():
1114 1114 repo.changelog.read(n)
1115 1115 #repo.changelog._cache = None
1116 1116 timer(d)
1117 1117 fm.end()
1118 1118
1119 1119 @command(b'perfignore', formatteropts)
1120 1120 def perfignore(ui, repo, **opts):
1121 1121 """benchmark operation related to computing ignore"""
1122 1122 opts = _byteskwargs(opts)
1123 1123 timer, fm = gettimer(ui, opts)
1124 1124 dirstate = repo.dirstate
1125 1125
1126 1126 def setupone():
1127 1127 dirstate.invalidate()
1128 1128 clearfilecache(dirstate, b'_ignore')
1129 1129
1130 1130 def runone():
1131 1131 dirstate._ignore
1132 1132
1133 1133 timer(runone, setup=setupone, title=b"load")
1134 1134 fm.end()
1135 1135
1136 1136 @command(b'perfindex', [
1137 1137 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1138 1138 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1139 1139 ] + formatteropts)
1140 1140 def perfindex(ui, repo, **opts):
1141 1141 """benchmark index creation time followed by a lookup
1142 1142
1143 1143 The default is to look `tip` up. Depending on the index implementation,
1144 1144 the revision looked up can matters. For example, an implementation
1145 1145 scanning the index will have a faster lookup time for `--rev tip` than for
1146 1146 `--rev 0`. The number of looked up revisions and their order can also
1147 1147 matters.
1148 1148
1149 1149 Example of useful set to test:
1150 1150 * tip
1151 1151 * 0
1152 1152 * -10:
1153 1153 * :10
1154 1154 * -10: + :10
1155 1155 * :10: + -10:
1156 1156 * -10000:
1157 1157 * -10000: + 0
1158 1158
1159 1159 It is not currently possible to check for lookup of a missing node. For
1160 1160 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1161 1161 import mercurial.revlog
1162 1162 opts = _byteskwargs(opts)
1163 1163 timer, fm = gettimer(ui, opts)
1164 1164 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1165 1165 if opts[b'no_lookup']:
1166 1166 if opts['rev']:
1167 1167 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1168 1168 nodes = []
1169 1169 elif not opts[b'rev']:
1170 1170 nodes = [repo[b"tip"].node()]
1171 1171 else:
1172 1172 revs = scmutil.revrange(repo, opts[b'rev'])
1173 1173 cl = repo.changelog
1174 1174 nodes = [cl.node(r) for r in revs]
1175 1175
1176 1176 unfi = repo.unfiltered()
1177 1177 # find the filecache func directly
1178 1178 # This avoid polluting the benchmark with the filecache logic
1179 1179 makecl = unfi.__class__.changelog.func
1180 1180 def setup():
1181 1181 # probably not necessary, but for good measure
1182 1182 clearchangelog(unfi)
1183 1183 def d():
1184 1184 cl = makecl(unfi)
1185 1185 for n in nodes:
1186 1186 cl.rev(n)
1187 1187 timer(d, setup=setup)
1188 1188 fm.end()
1189 1189
1190 1190 @command(b'perfnodemap', [
1191 1191 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1192 1192 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1193 1193 ] + formatteropts)
1194 1194 def perfnodemap(ui, repo, **opts):
1195 1195 """benchmark the time necessary to look up revision from a cold nodemap
1196 1196
1197 1197 Depending on the implementation, the amount and order of revision we look
1198 1198 up can varies. Example of useful set to test:
1199 1199 * tip
1200 1200 * 0
1201 1201 * -10:
1202 1202 * :10
1203 1203 * -10: + :10
1204 1204 * :10: + -10:
1205 1205 * -10000:
1206 1206 * -10000: + 0
1207 1207
1208 1208 The command currently focus on valid binary lookup. Benchmarking for
1209 1209 hexlookup, prefix lookup and missing lookup would also be valuable.
1210 1210 """
1211 1211 import mercurial.revlog
1212 1212 opts = _byteskwargs(opts)
1213 1213 timer, fm = gettimer(ui, opts)
1214 1214 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1215 1215
1216 1216 unfi = repo.unfiltered()
1217 1217 clearcaches = opts['clear_caches']
1218 1218 # find the filecache func directly
1219 1219 # This avoid polluting the benchmark with the filecache logic
1220 1220 makecl = unfi.__class__.changelog.func
1221 1221 if not opts[b'rev']:
1222 1222 raise error.Abort('use --rev to specify revisions to look up')
1223 1223 revs = scmutil.revrange(repo, opts[b'rev'])
1224 1224 cl = repo.changelog
1225 1225 nodes = [cl.node(r) for r in revs]
1226 1226
1227 1227 # use a list to pass reference to a nodemap from one closure to the next
1228 1228 nodeget = [None]
1229 1229 def setnodeget():
1230 1230 # probably not necessary, but for good measure
1231 1231 clearchangelog(unfi)
1232 1232 nodeget[0] = makecl(unfi).nodemap.get
1233 1233
1234 1234 def d():
1235 1235 get = nodeget[0]
1236 1236 for n in nodes:
1237 1237 get(n)
1238 1238
1239 1239 setup = None
1240 1240 if clearcaches:
1241 1241 def setup():
1242 1242 setnodeget()
1243 1243 else:
1244 1244 setnodeget()
1245 1245 d() # prewarm the data structure
1246 1246 timer(d, setup=setup)
1247 1247 fm.end()
1248 1248
1249 1249 @command(b'perfstartup', formatteropts)
1250 1250 def perfstartup(ui, repo, **opts):
1251 1251 opts = _byteskwargs(opts)
1252 1252 timer, fm = gettimer(ui, opts)
1253 1253 def d():
1254 1254 if os.name != r'nt':
1255 1255 os.system(b"HGRCPATH= %s version -q > /dev/null" %
1256 1256 fsencode(sys.argv[0]))
1257 1257 else:
1258 1258 os.environ[r'HGRCPATH'] = r' '
1259 1259 os.system(r"%s version -q > NUL" % sys.argv[0])
1260 1260 timer(d)
1261 1261 fm.end()
1262 1262
1263 1263 @command(b'perfparents', formatteropts)
1264 1264 def perfparents(ui, repo, **opts):
1265 1265 """benchmark the time necessary to fetch one changeset's parents.
1266 1266
1267 1267 The fetch is done using the `node identifier`, traversing all object layers
1268 1268 from the repository object. The first N revisions will be used for this
1269 1269 benchmark. N is controlled by the ``perf.parentscount`` config option
1270 1270 (default: 1000).
1271 1271 """
1272 1272 opts = _byteskwargs(opts)
1273 1273 timer, fm = gettimer(ui, opts)
1274 1274 # control the number of commits perfparents iterates over
1275 1275 # experimental config: perf.parentscount
1276 1276 count = getint(ui, b"perf", b"parentscount", 1000)
1277 1277 if len(repo.changelog) < count:
1278 1278 raise error.Abort(b"repo needs %d commits for this test" % count)
1279 1279 repo = repo.unfiltered()
1280 1280 nl = [repo.changelog.node(i) for i in _xrange(count)]
1281 1281 def d():
1282 1282 for n in nl:
1283 1283 repo.changelog.parents(n)
1284 1284 timer(d)
1285 1285 fm.end()
1286 1286
1287 1287 @command(b'perfctxfiles', formatteropts)
1288 1288 def perfctxfiles(ui, repo, x, **opts):
1289 1289 opts = _byteskwargs(opts)
1290 1290 x = int(x)
1291 1291 timer, fm = gettimer(ui, opts)
1292 1292 def d():
1293 1293 len(repo[x].files())
1294 1294 timer(d)
1295 1295 fm.end()
1296 1296
1297 1297 @command(b'perfrawfiles', formatteropts)
1298 1298 def perfrawfiles(ui, repo, x, **opts):
1299 1299 opts = _byteskwargs(opts)
1300 1300 x = int(x)
1301 1301 timer, fm = gettimer(ui, opts)
1302 1302 cl = repo.changelog
1303 1303 def d():
1304 1304 len(cl.read(x)[3])
1305 1305 timer(d)
1306 1306 fm.end()
1307 1307
1308 1308 @command(b'perflookup', formatteropts)
1309 1309 def perflookup(ui, repo, rev, **opts):
1310 1310 opts = _byteskwargs(opts)
1311 1311 timer, fm = gettimer(ui, opts)
1312 1312 timer(lambda: len(repo.lookup(rev)))
1313 1313 fm.end()
1314 1314
1315 1315 @command(b'perflinelogedits',
1316 1316 [(b'n', b'edits', 10000, b'number of edits'),
1317 1317 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1318 1318 ], norepo=True)
1319 1319 def perflinelogedits(ui, **opts):
1320 1320 from mercurial import linelog
1321 1321
1322 1322 opts = _byteskwargs(opts)
1323 1323
1324 1324 edits = opts[b'edits']
1325 1325 maxhunklines = opts[b'max_hunk_lines']
1326 1326
1327 1327 maxb1 = 100000
1328 1328 random.seed(0)
1329 1329 randint = random.randint
1330 1330 currentlines = 0
1331 1331 arglist = []
1332 1332 for rev in _xrange(edits):
1333 1333 a1 = randint(0, currentlines)
1334 1334 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1335 1335 b1 = randint(0, maxb1)
1336 1336 b2 = randint(b1, b1 + maxhunklines)
1337 1337 currentlines += (b2 - b1) - (a2 - a1)
1338 1338 arglist.append((rev, a1, a2, b1, b2))
1339 1339
1340 1340 def d():
1341 1341 ll = linelog.linelog()
1342 1342 for args in arglist:
1343 1343 ll.replacelines(*args)
1344 1344
1345 1345 timer, fm = gettimer(ui, opts)
1346 1346 timer(d)
1347 1347 fm.end()
1348 1348
1349 1349 @command(b'perfrevrange', formatteropts)
1350 1350 def perfrevrange(ui, repo, *specs, **opts):
1351 1351 opts = _byteskwargs(opts)
1352 1352 timer, fm = gettimer(ui, opts)
1353 1353 revrange = scmutil.revrange
1354 1354 timer(lambda: len(revrange(repo, specs)))
1355 1355 fm.end()
1356 1356
1357 1357 @command(b'perfnodelookup', formatteropts)
1358 1358 def perfnodelookup(ui, repo, rev, **opts):
1359 1359 opts = _byteskwargs(opts)
1360 1360 timer, fm = gettimer(ui, opts)
1361 1361 import mercurial.revlog
1362 1362 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1363 1363 n = scmutil.revsingle(repo, rev).node()
1364 1364 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1365 1365 def d():
1366 1366 cl.rev(n)
1367 1367 clearcaches(cl)
1368 1368 timer(d)
1369 1369 fm.end()
1370 1370
1371 1371 @command(b'perflog',
1372 1372 [(b'', b'rename', False, b'ask log to follow renames')
1373 1373 ] + formatteropts)
1374 1374 def perflog(ui, repo, rev=None, **opts):
1375 1375 opts = _byteskwargs(opts)
1376 1376 if rev is None:
1377 1377 rev=[]
1378 1378 timer, fm = gettimer(ui, opts)
1379 1379 ui.pushbuffer()
1380 1380 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1381 1381 copies=opts.get(b'rename')))
1382 1382 ui.popbuffer()
1383 1383 fm.end()
1384 1384
1385 1385 @command(b'perfmoonwalk', formatteropts)
1386 1386 def perfmoonwalk(ui, repo, **opts):
1387 1387 """benchmark walking the changelog backwards
1388 1388
1389 1389 This also loads the changelog data for each revision in the changelog.
1390 1390 """
1391 1391 opts = _byteskwargs(opts)
1392 1392 timer, fm = gettimer(ui, opts)
1393 1393 def moonwalk():
1394 1394 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1395 1395 ctx = repo[i]
1396 1396 ctx.branch() # read changelog data (in addition to the index)
1397 1397 timer(moonwalk)
1398 1398 fm.end()
1399 1399
1400 1400 @command(b'perftemplating',
1401 1401 [(b'r', b'rev', [], b'revisions to run the template on'),
1402 1402 ] + formatteropts)
1403 1403 def perftemplating(ui, repo, testedtemplate=None, **opts):
1404 1404 """test the rendering time of a given template"""
1405 1405 if makelogtemplater is None:
1406 1406 raise error.Abort((b"perftemplating not available with this Mercurial"),
1407 1407 hint=b"use 4.3 or later")
1408 1408
1409 1409 opts = _byteskwargs(opts)
1410 1410
1411 1411 nullui = ui.copy()
1412 1412 nullui.fout = open(os.devnull, r'wb')
1413 1413 nullui.disablepager()
1414 1414 revs = opts.get(b'rev')
1415 1415 if not revs:
1416 1416 revs = [b'all()']
1417 1417 revs = list(scmutil.revrange(repo, revs))
1418 1418
1419 1419 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1420 1420 b' {author|person}: {desc|firstline}\n')
1421 1421 if testedtemplate is None:
1422 1422 testedtemplate = defaulttemplate
1423 1423 displayer = makelogtemplater(nullui, repo, testedtemplate)
1424 1424 def format():
1425 1425 for r in revs:
1426 1426 ctx = repo[r]
1427 1427 displayer.show(ctx)
1428 1428 displayer.flush(ctx)
1429 1429
1430 1430 timer, fm = gettimer(ui, opts)
1431 1431 timer(format)
1432 1432 fm.end()
1433 1433
1434 1434 @command(b'perfhelper-pathcopies', formatteropts +
1435 1435 [
1436 1436 (b'r', b'revs', [], b'restrict search to these revisions'),
1437 1437 (b'', b'timing', False, b'provides extra data (costly)'),
1438 1438 ])
1439 1439 def perfhelperpathcopies(ui, repo, revs=[], **opts):
1440 1440 """find statistic about potential parameters for the `perftracecopies`
1441 1441
1442 1442 This command find source-destination pair relevant for copytracing testing.
1443 1443 It report value for some of the parameters that impact copy tracing time.
1444 1444
1445 1445 If `--timing` is set, rename detection is run and the associated timing
1446 1446 will be reported. The extra details comes at the cost of a slower command
1447 1447 execution.
1448 1448
1449 1449 Since the rename detection is only run once, other factors might easily
1450 1450 affect the precision of the timing. However it should give a good
1451 1451 approximation of which revision pairs are very costly.
1452 1452 """
1453 1453 opts = _byteskwargs(opts)
1454 1454 fm = ui.formatter(b'perf', opts)
1455 1455 dotiming = opts[b'timing']
1456 1456
1457 1457 if dotiming:
1458 1458 header = '%12s %12s %12s %12s %12s %12s\n'
1459 1459 output = ("%(source)12s %(destination)12s "
1460 1460 "%(nbrevs)12d %(nbmissingfiles)12d "
1461 1461 "%(nbrenamedfiles)12d %(time)18.5f\n")
1462 1462 header_names = ("source", "destination", "nb-revs", "nb-files",
1463 1463 "nb-renames", "time")
1464 1464 fm.plain(header % header_names)
1465 1465 else:
1466 1466 header = '%12s %12s %12s %12s\n'
1467 1467 output = ("%(source)12s %(destination)12s "
1468 1468 "%(nbrevs)12d %(nbmissingfiles)12d\n")
1469 1469 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
1470 1470
1471 1471 if not revs:
1472 1472 revs = ['all()']
1473 1473 revs = scmutil.revrange(repo, revs)
1474 1474
1475 1475 roi = repo.revs('merge() and %ld', revs)
1476 1476 for r in roi:
1477 1477 ctx = repo[r]
1478 1478 p1 = ctx.p1().rev()
1479 1479 p2 = ctx.p2().rev()
1480 1480 bases = repo.changelog._commonancestorsheads(p1, p2)
1481 1481 for p in (p1, p2):
1482 1482 for b in bases:
1483 1483 base = repo[b]
1484 1484 parent = repo[p]
1485 1485 missing = copies._computeforwardmissing(base, parent)
1486 1486 if not missing:
1487 1487 continue
1488 1488 data = {
1489 1489 b'source': base.hex(),
1490 1490 b'destination': parent.hex(),
1491 1491 b'nbrevs': len(repo.revs('%d::%d', b, p)),
1492 1492 b'nbmissingfiles': len(missing),
1493 1493 }
1494 1494 if dotiming:
1495 1495 begin = util.timer()
1496 1496 renames = copies.pathcopies(base, parent)
1497 1497 end = util.timer()
1498 1498 # not very stable timing since we did only one run
1499 1499 data['time'] = end - begin
1500 1500 data['nbrenamedfiles'] = len(renames)
1501 1501 fm.startitem()
1502 1502 fm.data(**data)
1503 1503 out = data.copy()
1504 1504 out['source'] = fm.hexfunc(base.node())
1505 1505 out['destination'] = fm.hexfunc(parent.node())
1506 1506 fm.plain(output % out)
1507 1507
1508 1508 fm.end()
1509 1509
1510 1510 @command(b'perfcca', formatteropts)
1511 1511 def perfcca(ui, repo, **opts):
1512 1512 opts = _byteskwargs(opts)
1513 1513 timer, fm = gettimer(ui, opts)
1514 1514 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1515 1515 fm.end()
1516 1516
1517 1517 @command(b'perffncacheload', formatteropts)
1518 1518 def perffncacheload(ui, repo, **opts):
1519 1519 opts = _byteskwargs(opts)
1520 1520 timer, fm = gettimer(ui, opts)
1521 1521 s = repo.store
1522 1522 def d():
1523 1523 s.fncache._load()
1524 1524 timer(d)
1525 1525 fm.end()
1526 1526
1527 1527 @command(b'perffncachewrite', formatteropts)
1528 1528 def perffncachewrite(ui, repo, **opts):
1529 1529 opts = _byteskwargs(opts)
1530 1530 timer, fm = gettimer(ui, opts)
1531 1531 s = repo.store
1532 1532 lock = repo.lock()
1533 1533 s.fncache._load()
1534 1534 tr = repo.transaction(b'perffncachewrite')
1535 1535 tr.addbackup(b'fncache')
1536 1536 def d():
1537 1537 s.fncache._dirty = True
1538 1538 s.fncache.write(tr)
1539 1539 timer(d)
1540 1540 tr.close()
1541 1541 lock.release()
1542 1542 fm.end()
1543 1543
1544 1544 @command(b'perffncacheencode', formatteropts)
1545 1545 def perffncacheencode(ui, repo, **opts):
1546 1546 opts = _byteskwargs(opts)
1547 1547 timer, fm = gettimer(ui, opts)
1548 1548 s = repo.store
1549 1549 s.fncache._load()
1550 1550 def d():
1551 1551 for p in s.fncache.entries:
1552 1552 s.encode(p)
1553 1553 timer(d)
1554 1554 fm.end()
1555 1555
1556 1556 def _bdiffworker(q, blocks, xdiff, ready, done):
1557 1557 while not done.is_set():
1558 1558 pair = q.get()
1559 1559 while pair is not None:
1560 1560 if xdiff:
1561 1561 mdiff.bdiff.xdiffblocks(*pair)
1562 1562 elif blocks:
1563 1563 mdiff.bdiff.blocks(*pair)
1564 1564 else:
1565 1565 mdiff.textdiff(*pair)
1566 1566 q.task_done()
1567 1567 pair = q.get()
1568 1568 q.task_done() # for the None one
1569 1569 with ready:
1570 1570 ready.wait()
1571 1571
1572 1572 def _manifestrevision(repo, mnode):
1573 1573 ml = repo.manifestlog
1574 1574
1575 1575 if util.safehasattr(ml, b'getstorage'):
1576 1576 store = ml.getstorage(b'')
1577 1577 else:
1578 1578 store = ml._revlog
1579 1579
1580 1580 return store.revision(mnode)
1581 1581
1582 1582 @command(b'perfbdiff', revlogopts + formatteropts + [
1583 1583 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1584 1584 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1585 1585 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1586 1586 (b'', b'blocks', False, b'test computing diffs into blocks'),
1587 1587 (b'', b'xdiff', False, b'use xdiff algorithm'),
1588 1588 ],
1589 1589
1590 1590 b'-c|-m|FILE REV')
1591 1591 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1592 1592 """benchmark a bdiff between revisions
1593 1593
1594 1594 By default, benchmark a bdiff between its delta parent and itself.
1595 1595
1596 1596 With ``--count``, benchmark bdiffs between delta parents and self for N
1597 1597 revisions starting at the specified revision.
1598 1598
1599 1599 With ``--alldata``, assume the requested revision is a changeset and
1600 1600 measure bdiffs for all changes related to that changeset (manifest
1601 1601 and filelogs).
1602 1602 """
1603 1603 opts = _byteskwargs(opts)
1604 1604
1605 1605 if opts[b'xdiff'] and not opts[b'blocks']:
1606 1606 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1607 1607
1608 1608 if opts[b'alldata']:
1609 1609 opts[b'changelog'] = True
1610 1610
1611 1611 if opts.get(b'changelog') or opts.get(b'manifest'):
1612 1612 file_, rev = None, file_
1613 1613 elif rev is None:
1614 1614 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1615 1615
1616 1616 blocks = opts[b'blocks']
1617 1617 xdiff = opts[b'xdiff']
1618 1618 textpairs = []
1619 1619
1620 1620 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1621 1621
1622 1622 startrev = r.rev(r.lookup(rev))
1623 1623 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1624 1624 if opts[b'alldata']:
1625 1625 # Load revisions associated with changeset.
1626 1626 ctx = repo[rev]
1627 1627 mtext = _manifestrevision(repo, ctx.manifestnode())
1628 1628 for pctx in ctx.parents():
1629 1629 pman = _manifestrevision(repo, pctx.manifestnode())
1630 1630 textpairs.append((pman, mtext))
1631 1631
1632 1632 # Load filelog revisions by iterating manifest delta.
1633 1633 man = ctx.manifest()
1634 1634 pman = ctx.p1().manifest()
1635 1635 for filename, change in pman.diff(man).items():
1636 1636 fctx = repo.file(filename)
1637 1637 f1 = fctx.revision(change[0][0] or -1)
1638 1638 f2 = fctx.revision(change[1][0] or -1)
1639 1639 textpairs.append((f1, f2))
1640 1640 else:
1641 1641 dp = r.deltaparent(rev)
1642 1642 textpairs.append((r.revision(dp), r.revision(rev)))
1643 1643
1644 1644 withthreads = threads > 0
1645 1645 if not withthreads:
1646 1646 def d():
1647 1647 for pair in textpairs:
1648 1648 if xdiff:
1649 1649 mdiff.bdiff.xdiffblocks(*pair)
1650 1650 elif blocks:
1651 1651 mdiff.bdiff.blocks(*pair)
1652 1652 else:
1653 1653 mdiff.textdiff(*pair)
1654 1654 else:
1655 1655 q = queue()
1656 1656 for i in _xrange(threads):
1657 1657 q.put(None)
1658 1658 ready = threading.Condition()
1659 1659 done = threading.Event()
1660 1660 for i in _xrange(threads):
1661 1661 threading.Thread(target=_bdiffworker,
1662 1662 args=(q, blocks, xdiff, ready, done)).start()
1663 1663 q.join()
1664 1664 def d():
1665 1665 for pair in textpairs:
1666 1666 q.put(pair)
1667 1667 for i in _xrange(threads):
1668 1668 q.put(None)
1669 1669 with ready:
1670 1670 ready.notify_all()
1671 1671 q.join()
1672 1672 timer, fm = gettimer(ui, opts)
1673 1673 timer(d)
1674 1674 fm.end()
1675 1675
1676 1676 if withthreads:
1677 1677 done.set()
1678 1678 for i in _xrange(threads):
1679 1679 q.put(None)
1680 1680 with ready:
1681 1681 ready.notify_all()
1682 1682
1683 1683 @command(b'perfunidiff', revlogopts + formatteropts + [
1684 1684 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1685 1685 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
1686 1686 ], b'-c|-m|FILE REV')
1687 1687 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1688 1688 """benchmark a unified diff between revisions
1689 1689
1690 1690 This doesn't include any copy tracing - it's just a unified diff
1691 1691 of the texts.
1692 1692
1693 1693 By default, benchmark a diff between its delta parent and itself.
1694 1694
1695 1695 With ``--count``, benchmark diffs between delta parents and self for N
1696 1696 revisions starting at the specified revision.
1697 1697
1698 1698 With ``--alldata``, assume the requested revision is a changeset and
1699 1699 measure diffs for all changes related to that changeset (manifest
1700 1700 and filelogs).
1701 1701 """
1702 1702 opts = _byteskwargs(opts)
1703 1703 if opts[b'alldata']:
1704 1704 opts[b'changelog'] = True
1705 1705
1706 1706 if opts.get(b'changelog') or opts.get(b'manifest'):
1707 1707 file_, rev = None, file_
1708 1708 elif rev is None:
1709 1709 raise error.CommandError(b'perfunidiff', b'invalid arguments')
1710 1710
1711 1711 textpairs = []
1712 1712
1713 1713 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
1714 1714
1715 1715 startrev = r.rev(r.lookup(rev))
1716 1716 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1717 1717 if opts[b'alldata']:
1718 1718 # Load revisions associated with changeset.
1719 1719 ctx = repo[rev]
1720 1720 mtext = _manifestrevision(repo, ctx.manifestnode())
1721 1721 for pctx in ctx.parents():
1722 1722 pman = _manifestrevision(repo, pctx.manifestnode())
1723 1723 textpairs.append((pman, mtext))
1724 1724
1725 1725 # Load filelog revisions by iterating manifest delta.
1726 1726 man = ctx.manifest()
1727 1727 pman = ctx.p1().manifest()
1728 1728 for filename, change in pman.diff(man).items():
1729 1729 fctx = repo.file(filename)
1730 1730 f1 = fctx.revision(change[0][0] or -1)
1731 1731 f2 = fctx.revision(change[1][0] or -1)
1732 1732 textpairs.append((f1, f2))
1733 1733 else:
1734 1734 dp = r.deltaparent(rev)
1735 1735 textpairs.append((r.revision(dp), r.revision(rev)))
1736 1736
1737 1737 def d():
1738 1738 for left, right in textpairs:
1739 1739 # The date strings don't matter, so we pass empty strings.
1740 1740 headerlines, hunks = mdiff.unidiff(
1741 1741 left, b'', right, b'', b'left', b'right', binary=False)
1742 1742 # consume iterators in roughly the way patch.py does
1743 1743 b'\n'.join(headerlines)
1744 1744 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1745 1745 timer, fm = gettimer(ui, opts)
1746 1746 timer(d)
1747 1747 fm.end()
1748 1748
1749 1749 @command(b'perfdiffwd', formatteropts)
1750 1750 def perfdiffwd(ui, repo, **opts):
1751 1751 """Profile diff of working directory changes"""
1752 1752 opts = _byteskwargs(opts)
1753 1753 timer, fm = gettimer(ui, opts)
1754 1754 options = {
1755 1755 'w': 'ignore_all_space',
1756 1756 'b': 'ignore_space_change',
1757 1757 'B': 'ignore_blank_lines',
1758 1758 }
1759 1759
1760 1760 for diffopt in ('', 'w', 'b', 'B', 'wB'):
1761 1761 opts = dict((options[c], b'1') for c in diffopt)
1762 1762 def d():
1763 1763 ui.pushbuffer()
1764 1764 commands.diff(ui, repo, **opts)
1765 1765 ui.popbuffer()
1766 1766 diffopt = diffopt.encode('ascii')
1767 1767 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
1768 1768 timer(d, title=title)
1769 1769 fm.end()
1770 1770
1771 1771 @command(b'perfrevlogindex', revlogopts + formatteropts,
1772 1772 b'-c|-m|FILE')
1773 1773 def perfrevlogindex(ui, repo, file_=None, **opts):
1774 1774 """Benchmark operations against a revlog index.
1775 1775
1776 1776 This tests constructing a revlog instance, reading index data,
1777 1777 parsing index data, and performing various operations related to
1778 1778 index data.
1779 1779 """
1780 1780
1781 1781 opts = _byteskwargs(opts)
1782 1782
1783 1783 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
1784 1784
1785 1785 opener = getattr(rl, 'opener') # trick linter
1786 1786 indexfile = rl.indexfile
1787 1787 data = opener.read(indexfile)
1788 1788
1789 1789 header = struct.unpack(b'>I', data[0:4])[0]
1790 1790 version = header & 0xFFFF
1791 1791 if version == 1:
1792 1792 revlogio = revlog.revlogio()
1793 1793 inline = header & (1 << 16)
1794 1794 else:
1795 1795 raise error.Abort((b'unsupported revlog version: %d') % version)
1796 1796
1797 1797 rllen = len(rl)
1798 1798
1799 1799 node0 = rl.node(0)
1800 1800 node25 = rl.node(rllen // 4)
1801 1801 node50 = rl.node(rllen // 2)
1802 1802 node75 = rl.node(rllen // 4 * 3)
1803 1803 node100 = rl.node(rllen - 1)
1804 1804
1805 1805 allrevs = range(rllen)
1806 1806 allrevsrev = list(reversed(allrevs))
1807 1807 allnodes = [rl.node(rev) for rev in range(rllen)]
1808 1808 allnodesrev = list(reversed(allnodes))
1809 1809
1810 1810 def constructor():
1811 1811 revlog.revlog(opener, indexfile)
1812 1812
1813 1813 def read():
1814 1814 with opener(indexfile) as fh:
1815 1815 fh.read()
1816 1816
1817 1817 def parseindex():
1818 1818 revlogio.parseindex(data, inline)
1819 1819
1820 1820 def getentry(revornode):
1821 1821 index = revlogio.parseindex(data, inline)[0]
1822 1822 index[revornode]
1823 1823
1824 1824 def getentries(revs, count=1):
1825 1825 index = revlogio.parseindex(data, inline)[0]
1826 1826
1827 1827 for i in range(count):
1828 1828 for rev in revs:
1829 1829 index[rev]
1830 1830
1831 1831 def resolvenode(node):
1832 1832 nodemap = revlogio.parseindex(data, inline)[1]
1833 1833 # This only works for the C code.
1834 1834 if nodemap is None:
1835 1835 return
1836 1836
1837 1837 try:
1838 1838 nodemap[node]
1839 1839 except error.RevlogError:
1840 1840 pass
1841 1841
1842 1842 def resolvenodes(nodes, count=1):
1843 1843 nodemap = revlogio.parseindex(data, inline)[1]
1844 1844 if nodemap is None:
1845 1845 return
1846 1846
1847 1847 for i in range(count):
1848 1848 for node in nodes:
1849 1849 try:
1850 1850 nodemap[node]
1851 1851 except error.RevlogError:
1852 1852 pass
1853 1853
1854 1854 benches = [
1855 1855 (constructor, b'revlog constructor'),
1856 1856 (read, b'read'),
1857 1857 (parseindex, b'create index object'),
1858 1858 (lambda: getentry(0), b'retrieve index entry for rev 0'),
1859 1859 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
1860 1860 (lambda: resolvenode(node0), b'look up node at rev 0'),
1861 1861 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
1862 1862 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
1863 1863 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
1864 1864 (lambda: resolvenode(node100), b'look up node at tip'),
1865 1865 # 2x variation is to measure caching impact.
1866 1866 (lambda: resolvenodes(allnodes),
1867 1867 b'look up all nodes (forward)'),
1868 1868 (lambda: resolvenodes(allnodes, 2),
1869 1869 b'look up all nodes 2x (forward)'),
1870 1870 (lambda: resolvenodes(allnodesrev),
1871 1871 b'look up all nodes (reverse)'),
1872 1872 (lambda: resolvenodes(allnodesrev, 2),
1873 1873 b'look up all nodes 2x (reverse)'),
1874 1874 (lambda: getentries(allrevs),
1875 1875 b'retrieve all index entries (forward)'),
1876 1876 (lambda: getentries(allrevs, 2),
1877 1877 b'retrieve all index entries 2x (forward)'),
1878 1878 (lambda: getentries(allrevsrev),
1879 1879 b'retrieve all index entries (reverse)'),
1880 1880 (lambda: getentries(allrevsrev, 2),
1881 1881 b'retrieve all index entries 2x (reverse)'),
1882 1882 ]
1883 1883
1884 1884 for fn, title in benches:
1885 1885 timer, fm = gettimer(ui, opts)
1886 1886 timer(fn, title=title)
1887 1887 fm.end()
1888 1888
1889 1889 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
1890 1890 [(b'd', b'dist', 100, b'distance between the revisions'),
1891 1891 (b's', b'startrev', 0, b'revision to start reading at'),
1892 1892 (b'', b'reverse', False, b'read in reverse')],
1893 1893 b'-c|-m|FILE')
1894 1894 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
1895 1895 **opts):
1896 1896 """Benchmark reading a series of revisions from a revlog.
1897 1897
1898 1898 By default, we read every ``-d/--dist`` revision from 0 to tip of
1899 1899 the specified revlog.
1900 1900
1901 1901 The start revision can be defined via ``-s/--startrev``.
1902 1902 """
1903 1903 opts = _byteskwargs(opts)
1904 1904
1905 1905 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
1906 1906 rllen = getlen(ui)(rl)
1907 1907
1908 1908 if startrev < 0:
1909 1909 startrev = rllen + startrev
1910 1910
1911 1911 def d():
1912 1912 rl.clearcaches()
1913 1913
1914 1914 beginrev = startrev
1915 1915 endrev = rllen
1916 1916 dist = opts[b'dist']
1917 1917
1918 1918 if reverse:
1919 1919 beginrev, endrev = endrev - 1, beginrev - 1
1920 1920 dist = -1 * dist
1921 1921
1922 1922 for x in _xrange(beginrev, endrev, dist):
1923 1923 # Old revisions don't support passing int.
1924 1924 n = rl.node(x)
1925 1925 rl.revision(n)
1926 1926
1927 1927 timer, fm = gettimer(ui, opts)
1928 1928 timer(d)
1929 1929 fm.end()
1930 1930
1931 1931 @command(b'perfrevlogwrite', revlogopts + formatteropts +
1932 1932 [(b's', b'startrev', 1000, b'revision to start writing at'),
1933 1933 (b'', b'stoprev', -1, b'last revision to write'),
1934 1934 (b'', b'count', 3, b'last revision to write'),
1935 1935 (b'', b'details', False, b'print timing for every revisions tested'),
1936 1936 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
1937 1937 (b'', b'lazydeltabase', True, b'try the provided delta first'),
1938 1938 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1939 1939 ],
1940 1940 b'-c|-m|FILE')
1941 1941 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
1942 1942 """Benchmark writing a series of revisions to a revlog.
1943 1943
1944 1944 Possible source values are:
1945 1945 * `full`: add from a full text (default).
1946 1946 * `parent-1`: add from a delta to the first parent
1947 1947 * `parent-2`: add from a delta to the second parent if it exists
1948 1948 (use a delta from the first parent otherwise)
1949 1949 * `parent-smallest`: add from the smallest delta (either p1 or p2)
1950 1950 * `storage`: add from the existing precomputed deltas
1951 1951 """
1952 1952 opts = _byteskwargs(opts)
1953 1953
1954 1954 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
1955 1955 rllen = getlen(ui)(rl)
1956 1956 if startrev < 0:
1957 1957 startrev = rllen + startrev
1958 1958 if stoprev < 0:
1959 1959 stoprev = rllen + stoprev
1960 1960
1961 1961 lazydeltabase = opts['lazydeltabase']
1962 1962 source = opts['source']
1963 1963 clearcaches = opts['clear_caches']
1964 1964 validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest',
1965 1965 b'storage')
1966 1966 if source not in validsource:
1967 1967 raise error.Abort('invalid source type: %s' % source)
1968 1968
1969 1969 ### actually gather results
1970 1970 count = opts['count']
1971 1971 if count <= 0:
1972 1972 raise error.Abort('invalide run count: %d' % count)
1973 1973 allresults = []
1974 1974 for c in range(count):
1975 1975 timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1,
1976 1976 lazydeltabase=lazydeltabase,
1977 1977 clearcaches=clearcaches)
1978 1978 allresults.append(timing)
1979 1979
1980 1980 ### consolidate the results in a single list
1981 1981 results = []
1982 1982 for idx, (rev, t) in enumerate(allresults[0]):
1983 1983 ts = [t]
1984 1984 for other in allresults[1:]:
1985 1985 orev, ot = other[idx]
1986 1986 assert orev == rev
1987 1987 ts.append(ot)
1988 1988 results.append((rev, ts))
1989 1989 resultcount = len(results)
1990 1990
1991 1991 ### Compute and display relevant statistics
1992 1992
1993 1993 # get a formatter
1994 1994 fm = ui.formatter(b'perf', opts)
1995 1995 displayall = ui.configbool(b"perf", b"all-timing", False)
1996 1996
1997 1997 # print individual details if requested
1998 1998 if opts['details']:
1999 1999 for idx, item in enumerate(results, 1):
2000 2000 rev, data = item
2001 2001 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
2002 2002 formatone(fm, data, title=title, displayall=displayall)
2003 2003
2004 2004 # sorts results by median time
2005 2005 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
2006 2006 # list of (name, index) to display)
2007 2007 relevants = [
2008 2008 ("min", 0),
2009 2009 ("10%", resultcount * 10 // 100),
2010 2010 ("25%", resultcount * 25 // 100),
2011 2011 ("50%", resultcount * 70 // 100),
2012 2012 ("75%", resultcount * 75 // 100),
2013 2013 ("90%", resultcount * 90 // 100),
2014 2014 ("95%", resultcount * 95 // 100),
2015 2015 ("99%", resultcount * 99 // 100),
2016 2016 ("99.9%", resultcount * 999 // 1000),
2017 2017 ("99.99%", resultcount * 9999 // 10000),
2018 2018 ("99.999%", resultcount * 99999 // 100000),
2019 2019 ("max", -1),
2020 2020 ]
2021 2021 if not ui.quiet:
2022 2022 for name, idx in relevants:
2023 2023 data = results[idx]
2024 2024 title = '%s of %d, rev %d' % (name, resultcount, data[0])
2025 2025 formatone(fm, data[1], title=title, displayall=displayall)
2026 2026
2027 2027 # XXX summing that many float will not be very precise, we ignore this fact
2028 2028 # for now
2029 2029 totaltime = []
2030 2030 for item in allresults:
2031 2031 totaltime.append((sum(x[1][0] for x in item),
2032 2032 sum(x[1][1] for x in item),
2033 2033 sum(x[1][2] for x in item),)
2034 2034 )
2035 2035 formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
2036 2036 displayall=displayall)
2037 2037 fm.end()
2038 2038
2039 2039 class _faketr(object):
2040 2040 def add(s, x, y, z=None):
2041 2041 return None
2042 2042
2043 2043 def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None,
2044 2044 lazydeltabase=True, clearcaches=True):
2045 2045 timings = []
2046 2046 tr = _faketr()
2047 2047 with _temprevlog(ui, orig, startrev) as dest:
2048 2048 dest._lazydeltabase = lazydeltabase
2049 2049 revs = list(orig.revs(startrev, stoprev))
2050 2050 total = len(revs)
2051 2051 topic = 'adding'
2052 2052 if runidx is not None:
2053 2053 topic += ' (run #%d)' % runidx
2054 2054 # Support both old and new progress API
2055 2055 if util.safehasattr(ui, 'makeprogress'):
2056 2056 progress = ui.makeprogress(topic, unit='revs', total=total)
2057 2057 def updateprogress(pos):
2058 2058 progress.update(pos)
2059 2059 def completeprogress():
2060 2060 progress.complete()
2061 2061 else:
2062 2062 def updateprogress(pos):
2063 2063 ui.progress(topic, pos, unit='revs', total=total)
2064 2064 def completeprogress():
2065 2065 ui.progress(topic, None, unit='revs', total=total)
2066 2066
2067 2067 for idx, rev in enumerate(revs):
2068 2068 updateprogress(idx)
2069 2069 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
2070 2070 if clearcaches:
2071 2071 dest.index.clearcaches()
2072 2072 dest.clearcaches()
2073 2073 with timeone() as r:
2074 2074 dest.addrawrevision(*addargs, **addkwargs)
2075 2075 timings.append((rev, r[0]))
2076 2076 updateprogress(total)
2077 2077 completeprogress()
2078 2078 return timings
2079 2079
2080 2080 def _getrevisionseed(orig, rev, tr, source):
2081 2081 from mercurial.node import nullid
2082 2082
2083 2083 linkrev = orig.linkrev(rev)
2084 2084 node = orig.node(rev)
2085 2085 p1, p2 = orig.parents(node)
2086 2086 flags = orig.flags(rev)
2087 2087 cachedelta = None
2088 2088 text = None
2089 2089
2090 2090 if source == b'full':
2091 2091 text = orig.revision(rev)
2092 2092 elif source == b'parent-1':
2093 2093 baserev = orig.rev(p1)
2094 2094 cachedelta = (baserev, orig.revdiff(p1, rev))
2095 2095 elif source == b'parent-2':
2096 2096 parent = p2
2097 2097 if p2 == nullid:
2098 2098 parent = p1
2099 2099 baserev = orig.rev(parent)
2100 2100 cachedelta = (baserev, orig.revdiff(parent, rev))
2101 2101 elif source == b'parent-smallest':
2102 2102 p1diff = orig.revdiff(p1, rev)
2103 2103 parent = p1
2104 2104 diff = p1diff
2105 2105 if p2 != nullid:
2106 2106 p2diff = orig.revdiff(p2, rev)
2107 2107 if len(p1diff) > len(p2diff):
2108 2108 parent = p2
2109 2109 diff = p2diff
2110 2110 baserev = orig.rev(parent)
2111 2111 cachedelta = (baserev, diff)
2112 2112 elif source == b'storage':
2113 2113 baserev = orig.deltaparent(rev)
2114 2114 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
2115 2115
2116 2116 return ((text, tr, linkrev, p1, p2),
2117 2117 {'node': node, 'flags': flags, 'cachedelta': cachedelta})
2118 2118
2119 2119 @contextlib.contextmanager
2120 2120 def _temprevlog(ui, orig, truncaterev):
2121 2121 from mercurial import vfs as vfsmod
2122 2122
2123 2123 if orig._inline:
2124 2124 raise error.Abort('not supporting inline revlog (yet)')
2125 2125
2126 2126 origindexpath = orig.opener.join(orig.indexfile)
2127 2127 origdatapath = orig.opener.join(orig.datafile)
2128 2128 indexname = 'revlog.i'
2129 2129 dataname = 'revlog.d'
2130 2130
2131 2131 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
2132 2132 try:
2133 2133 # copy the data file in a temporary directory
2134 2134 ui.debug('copying data in %s\n' % tmpdir)
2135 2135 destindexpath = os.path.join(tmpdir, 'revlog.i')
2136 2136 destdatapath = os.path.join(tmpdir, 'revlog.d')
2137 2137 shutil.copyfile(origindexpath, destindexpath)
2138 2138 shutil.copyfile(origdatapath, destdatapath)
2139 2139
2140 2140 # remove the data we want to add again
2141 2141 ui.debug('truncating data to be rewritten\n')
2142 2142 with open(destindexpath, 'ab') as index:
2143 2143 index.seek(0)
2144 2144 index.truncate(truncaterev * orig._io.size)
2145 2145 with open(destdatapath, 'ab') as data:
2146 2146 data.seek(0)
2147 2147 data.truncate(orig.start(truncaterev))
2148 2148
2149 2149 # instantiate a new revlog from the temporary copy
2150 2150 ui.debug('truncating adding to be rewritten\n')
2151 2151 vfs = vfsmod.vfs(tmpdir)
2152 2152 vfs.options = getattr(orig.opener, 'options', None)
2153 2153
2154 2154 dest = revlog.revlog(vfs,
2155 2155 indexfile=indexname,
2156 2156 datafile=dataname)
2157 2157 if dest._inline:
2158 2158 raise error.Abort('not supporting inline revlog (yet)')
2159 2159 # make sure internals are initialized
2160 2160 dest.revision(len(dest) - 1)
2161 2161 yield dest
2162 2162 del dest, vfs
2163 2163 finally:
2164 2164 shutil.rmtree(tmpdir, True)
2165 2165
2166 2166 @command(b'perfrevlogchunks', revlogopts + formatteropts +
2167 2167 [(b'e', b'engines', b'', b'compression engines to use'),
2168 2168 (b's', b'startrev', 0, b'revision to start at')],
2169 2169 b'-c|-m|FILE')
2170 2170 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
2171 2171 """Benchmark operations on revlog chunks.
2172 2172
2173 2173 Logically, each revlog is a collection of fulltext revisions. However,
2174 2174 stored within each revlog are "chunks" of possibly compressed data. This
2175 2175 data needs to be read and decompressed or compressed and written.
2176 2176
2177 2177 This command measures the time it takes to read+decompress and recompress
2178 2178 chunks in a revlog. It effectively isolates I/O and compression performance.
2179 2179 For measurements of higher-level operations like resolving revisions,
2180 2180 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
2181 2181 """
2182 2182 opts = _byteskwargs(opts)
2183 2183
2184 2184 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
2185 2185
2186 2186 # _chunkraw was renamed to _getsegmentforrevs.
2187 2187 try:
2188 2188 segmentforrevs = rl._getsegmentforrevs
2189 2189 except AttributeError:
2190 2190 segmentforrevs = rl._chunkraw
2191 2191
2192 2192 # Verify engines argument.
2193 2193 if engines:
2194 2194 engines = set(e.strip() for e in engines.split(b','))
2195 2195 for engine in engines:
2196 2196 try:
2197 2197 util.compressionengines[engine]
2198 2198 except KeyError:
2199 2199 raise error.Abort(b'unknown compression engine: %s' % engine)
2200 2200 else:
2201 2201 engines = []
2202 2202 for e in util.compengines:
2203 2203 engine = util.compengines[e]
2204 2204 try:
2205 2205 if engine.available():
2206 2206 engine.revlogcompressor().compress(b'dummy')
2207 2207 engines.append(e)
2208 2208 except NotImplementedError:
2209 2209 pass
2210 2210
2211 2211 revs = list(rl.revs(startrev, len(rl) - 1))
2212 2212
2213 2213 def rlfh(rl):
2214 2214 if rl._inline:
2215 2215 return getsvfs(repo)(rl.indexfile)
2216 2216 else:
2217 2217 return getsvfs(repo)(rl.datafile)
2218 2218
2219 2219 def doread():
2220 2220 rl.clearcaches()
2221 2221 for rev in revs:
2222 2222 segmentforrevs(rev, rev)
2223 2223
2224 2224 def doreadcachedfh():
2225 2225 rl.clearcaches()
2226 2226 fh = rlfh(rl)
2227 2227 for rev in revs:
2228 2228 segmentforrevs(rev, rev, df=fh)
2229 2229
2230 2230 def doreadbatch():
2231 2231 rl.clearcaches()
2232 2232 segmentforrevs(revs[0], revs[-1])
2233 2233
2234 2234 def doreadbatchcachedfh():
2235 2235 rl.clearcaches()
2236 2236 fh = rlfh(rl)
2237 2237 segmentforrevs(revs[0], revs[-1], df=fh)
2238 2238
2239 2239 def dochunk():
2240 2240 rl.clearcaches()
2241 2241 fh = rlfh(rl)
2242 2242 for rev in revs:
2243 2243 rl._chunk(rev, df=fh)
2244 2244
2245 2245 chunks = [None]
2246 2246
2247 2247 def dochunkbatch():
2248 2248 rl.clearcaches()
2249 2249 fh = rlfh(rl)
2250 2250 # Save chunks as a side-effect.
2251 2251 chunks[0] = rl._chunks(revs, df=fh)
2252 2252
2253 2253 def docompress(compressor):
2254 2254 rl.clearcaches()
2255 2255
2256 2256 try:
2257 2257 # Swap in the requested compression engine.
2258 2258 oldcompressor = rl._compressor
2259 2259 rl._compressor = compressor
2260 2260 for chunk in chunks[0]:
2261 2261 rl.compress(chunk)
2262 2262 finally:
2263 2263 rl._compressor = oldcompressor
2264 2264
2265 2265 benches = [
2266 2266 (lambda: doread(), b'read'),
2267 2267 (lambda: doreadcachedfh(), b'read w/ reused fd'),
2268 2268 (lambda: doreadbatch(), b'read batch'),
2269 2269 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
2270 2270 (lambda: dochunk(), b'chunk'),
2271 2271 (lambda: dochunkbatch(), b'chunk batch'),
2272 2272 ]
2273 2273
2274 2274 for engine in sorted(engines):
2275 2275 compressor = util.compengines[engine].revlogcompressor()
2276 2276 benches.append((functools.partial(docompress, compressor),
2277 2277 b'compress w/ %s' % engine))
2278 2278
2279 2279 for fn, title in benches:
2280 2280 timer, fm = gettimer(ui, opts)
2281 2281 timer(fn, title=title)
2282 2282 fm.end()
2283 2283
2284 2284 @command(b'perfrevlogrevision', revlogopts + formatteropts +
2285 2285 [(b'', b'cache', False, b'use caches instead of clearing')],
2286 2286 b'-c|-m|FILE REV')
2287 2287 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
2288 2288 """Benchmark obtaining a revlog revision.
2289 2289
2290 2290 Obtaining a revlog revision consists of roughly the following steps:
2291 2291
2292 2292 1. Compute the delta chain
2293 2293 2. Slice the delta chain if applicable
2294 2294 3. Obtain the raw chunks for that delta chain
2295 2295 4. Decompress each raw chunk
2296 2296 5. Apply binary patches to obtain fulltext
2297 2297 6. Verify hash of fulltext
2298 2298
2299 2299 This command measures the time spent in each of these phases.
2300 2300 """
2301 2301 opts = _byteskwargs(opts)
2302 2302
2303 2303 if opts.get(b'changelog') or opts.get(b'manifest'):
2304 2304 file_, rev = None, file_
2305 2305 elif rev is None:
2306 2306 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
2307 2307
2308 2308 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
2309 2309
2310 2310 # _chunkraw was renamed to _getsegmentforrevs.
2311 2311 try:
2312 2312 segmentforrevs = r._getsegmentforrevs
2313 2313 except AttributeError:
2314 2314 segmentforrevs = r._chunkraw
2315 2315
2316 2316 node = r.lookup(rev)
2317 2317 rev = r.rev(node)
2318 2318
2319 2319 def getrawchunks(data, chain):
2320 2320 start = r.start
2321 2321 length = r.length
2322 2322 inline = r._inline
2323 2323 iosize = r._io.size
2324 2324 buffer = util.buffer
2325 2325
2326 2326 chunks = []
2327 2327 ladd = chunks.append
2328 2328 for idx, item in enumerate(chain):
2329 2329 offset = start(item[0])
2330 2330 bits = data[idx]
2331 2331 for rev in item:
2332 2332 chunkstart = start(rev)
2333 2333 if inline:
2334 2334 chunkstart += (rev + 1) * iosize
2335 2335 chunklength = length(rev)
2336 2336 ladd(buffer(bits, chunkstart - offset, chunklength))
2337 2337
2338 2338 return chunks
2339 2339
2340 2340 def dodeltachain(rev):
2341 2341 if not cache:
2342 2342 r.clearcaches()
2343 2343 r._deltachain(rev)
2344 2344
2345 2345 def doread(chain):
2346 2346 if not cache:
2347 2347 r.clearcaches()
2348 2348 for item in slicedchain:
2349 2349 segmentforrevs(item[0], item[-1])
2350 2350
2351 2351 def doslice(r, chain, size):
2352 2352 for s in slicechunk(r, chain, targetsize=size):
2353 2353 pass
2354 2354
2355 2355 def dorawchunks(data, chain):
2356 2356 if not cache:
2357 2357 r.clearcaches()
2358 2358 getrawchunks(data, chain)
2359 2359
2360 2360 def dodecompress(chunks):
2361 2361 decomp = r.decompress
2362 2362 for chunk in chunks:
2363 2363 decomp(chunk)
2364 2364
2365 2365 def dopatch(text, bins):
2366 2366 if not cache:
2367 2367 r.clearcaches()
2368 2368 mdiff.patches(text, bins)
2369 2369
2370 2370 def dohash(text):
2371 2371 if not cache:
2372 2372 r.clearcaches()
2373 2373 r.checkhash(text, node, rev=rev)
2374 2374
2375 2375 def dorevision():
2376 2376 if not cache:
2377 2377 r.clearcaches()
2378 2378 r.revision(node)
2379 2379
2380 2380 try:
2381 2381 from mercurial.revlogutils.deltas import slicechunk
2382 2382 except ImportError:
2383 2383 slicechunk = getattr(revlog, '_slicechunk', None)
2384 2384
2385 2385 size = r.length(rev)
2386 2386 chain = r._deltachain(rev)[0]
2387 2387 if not getattr(r, '_withsparseread', False):
2388 2388 slicedchain = (chain,)
2389 2389 else:
2390 2390 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
2391 2391 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
2392 2392 rawchunks = getrawchunks(data, slicedchain)
2393 2393 bins = r._chunks(chain)
2394 2394 text = bytes(bins[0])
2395 2395 bins = bins[1:]
2396 2396 text = mdiff.patches(text, bins)
2397 2397
2398 2398 benches = [
2399 2399 (lambda: dorevision(), b'full'),
2400 2400 (lambda: dodeltachain(rev), b'deltachain'),
2401 2401 (lambda: doread(chain), b'read'),
2402 2402 ]
2403 2403
2404 2404 if getattr(r, '_withsparseread', False):
2405 2405 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
2406 2406 benches.append(slicing)
2407 2407
2408 2408 benches.extend([
2409 2409 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
2410 2410 (lambda: dodecompress(rawchunks), b'decompress'),
2411 2411 (lambda: dopatch(text, bins), b'patch'),
2412 2412 (lambda: dohash(text), b'hash'),
2413 2413 ])
2414 2414
2415 2415 timer, fm = gettimer(ui, opts)
2416 2416 for fn, title in benches:
2417 2417 timer(fn, title=title)
2418 2418 fm.end()
2419 2419
2420 2420 @command(b'perfrevset',
2421 2421 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
2422 2422 (b'', b'contexts', False, b'obtain changectx for each revision')]
2423 2423 + formatteropts, b"REVSET")
2424 2424 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
2425 2425 """benchmark the execution time of a revset
2426 2426
2427 2427 Use the --clean option if need to evaluate the impact of build volatile
2428 2428 revisions set cache on the revset execution. Volatile cache hold filtered
2429 2429 and obsolete related cache."""
2430 2430 opts = _byteskwargs(opts)
2431 2431
2432 2432 timer, fm = gettimer(ui, opts)
2433 2433 def d():
2434 2434 if clear:
2435 2435 repo.invalidatevolatilesets()
2436 2436 if contexts:
2437 2437 for ctx in repo.set(expr): pass
2438 2438 else:
2439 2439 for r in repo.revs(expr): pass
2440 2440 timer(d)
2441 2441 fm.end()
2442 2442
2443 2443 @command(b'perfvolatilesets',
2444 2444 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
2445 2445 ] + formatteropts)
2446 2446 def perfvolatilesets(ui, repo, *names, **opts):
2447 2447 """benchmark the computation of various volatile set
2448 2448
2449 2449 Volatile set computes element related to filtering and obsolescence."""
2450 2450 opts = _byteskwargs(opts)
2451 2451 timer, fm = gettimer(ui, opts)
2452 2452 repo = repo.unfiltered()
2453 2453
2454 2454 def getobs(name):
2455 2455 def d():
2456 2456 repo.invalidatevolatilesets()
2457 2457 if opts[b'clear_obsstore']:
2458 2458 clearfilecache(repo, b'obsstore')
2459 2459 obsolete.getrevs(repo, name)
2460 2460 return d
2461 2461
2462 2462 allobs = sorted(obsolete.cachefuncs)
2463 2463 if names:
2464 2464 allobs = [n for n in allobs if n in names]
2465 2465
2466 2466 for name in allobs:
2467 2467 timer(getobs(name), title=name)
2468 2468
2469 2469 def getfiltered(name):
2470 2470 def d():
2471 2471 repo.invalidatevolatilesets()
2472 2472 if opts[b'clear_obsstore']:
2473 2473 clearfilecache(repo, b'obsstore')
2474 2474 repoview.filterrevs(repo, name)
2475 2475 return d
2476 2476
2477 2477 allfilter = sorted(repoview.filtertable)
2478 2478 if names:
2479 2479 allfilter = [n for n in allfilter if n in names]
2480 2480
2481 2481 for name in allfilter:
2482 2482 timer(getfiltered(name), title=name)
2483 2483 fm.end()
2484 2484
2485 2485 @command(b'perfbranchmap',
2486 2486 [(b'f', b'full', False,
2487 2487 b'Includes build time of subset'),
2488 2488 (b'', b'clear-revbranch', False,
2489 2489 b'purge the revbranch cache between computation'),
2490 2490 ] + formatteropts)
2491 2491 def perfbranchmap(ui, repo, *filternames, **opts):
2492 2492 """benchmark the update of a branchmap
2493 2493
2494 2494 This benchmarks the full repo.branchmap() call with read and write disabled
2495 2495 """
2496 2496 opts = _byteskwargs(opts)
2497 2497 full = opts.get(b"full", False)
2498 2498 clear_revbranch = opts.get(b"clear_revbranch", False)
2499 2499 timer, fm = gettimer(ui, opts)
2500 2500 def getbranchmap(filtername):
2501 2501 """generate a benchmark function for the filtername"""
2502 2502 if filtername is None:
2503 2503 view = repo
2504 2504 else:
2505 2505 view = repo.filtered(filtername)
2506 2506 if util.safehasattr(view._branchcaches, '_per_filter'):
2507 2507 filtered = view._branchcaches._per_filter
2508 2508 else:
2509 2509 # older versions
2510 2510 filtered = view._branchcaches
2511 2511 def d():
2512 2512 if clear_revbranch:
2513 2513 repo.revbranchcache()._clear()
2514 2514 if full:
2515 2515 view._branchcaches.clear()
2516 2516 else:
2517 2517 filtered.pop(filtername, None)
2518 2518 view.branchmap()
2519 2519 return d
2520 2520 # add filter in smaller subset to bigger subset
2521 2521 possiblefilters = set(repoview.filtertable)
2522 2522 if filternames:
2523 2523 possiblefilters &= set(filternames)
2524 2524 subsettable = getbranchmapsubsettable()
2525 2525 allfilters = []
2526 2526 while possiblefilters:
2527 2527 for name in possiblefilters:
2528 2528 subset = subsettable.get(name)
2529 2529 if subset not in possiblefilters:
2530 2530 break
2531 2531 else:
2532 2532 assert False, b'subset cycle %s!' % possiblefilters
2533 2533 allfilters.append(name)
2534 2534 possiblefilters.remove(name)
2535 2535
2536 2536 # warm the cache
2537 2537 if not full:
2538 2538 for name in allfilters:
2539 2539 repo.filtered(name).branchmap()
2540 2540 if not filternames or b'unfiltered' in filternames:
2541 2541 # add unfiltered
2542 2542 allfilters.append(None)
2543 2543
2544 2544 if util.safehasattr(branchmap.branchcache, 'fromfile'):
2545 2545 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
2546 2546 branchcacheread.set(classmethod(lambda *args: None))
2547 2547 else:
2548 2548 # older versions
2549 2549 branchcacheread = safeattrsetter(branchmap, b'read')
2550 2550 branchcacheread.set(lambda *args: None)
2551 2551 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
2552 2552 branchcachewrite.set(lambda *args: None)
2553 2553 try:
2554 2554 for name in allfilters:
2555 2555 printname = name
2556 2556 if name is None:
2557 2557 printname = b'unfiltered'
2558 2558 timer(getbranchmap(name), title=str(printname))
2559 2559 finally:
2560 2560 branchcacheread.restore()
2561 2561 branchcachewrite.restore()
2562 2562 fm.end()
2563 2563
2564 2564 @command(b'perfbranchmapupdate', [
2565 2565 (b'', b'base', [], b'subset of revision to start from'),
2566 2566 (b'', b'target', [], b'subset of revision to end with'),
2567 2567 (b'', b'clear-caches', False, b'clear cache between each runs')
2568 2568 ] + formatteropts)
2569 2569 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
2570 2570 """benchmark branchmap update from for <base> revs to <target> revs
2571 2571
2572 2572 If `--clear-caches` is passed, the following items will be reset before
2573 2573 each update:
2574 2574 * the changelog instance and associated indexes
2575 2575 * the rev-branch-cache instance
2576 2576
2577 2577 Examples:
2578 2578
2579 2579 # update for the one last revision
2580 2580 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
2581 2581
2582 2582 $ update for change coming with a new branch
2583 2583 $ hg perfbranchmapupdate --base 'stable' --target 'default'
2584 2584 """
2585 2585 from mercurial import branchmap
2586 2586 from mercurial import repoview
2587 2587 opts = _byteskwargs(opts)
2588 2588 timer, fm = gettimer(ui, opts)
2589 2589 clearcaches = opts[b'clear_caches']
2590 2590 unfi = repo.unfiltered()
2591 2591 x = [None] # used to pass data between closure
2592 2592
2593 2593 # we use a `list` here to avoid possible side effect from smartset
2594 2594 baserevs = list(scmutil.revrange(repo, base))
2595 2595 targetrevs = list(scmutil.revrange(repo, target))
2596 2596 if not baserevs:
2597 2597 raise error.Abort(b'no revisions selected for --base')
2598 2598 if not targetrevs:
2599 2599 raise error.Abort(b'no revisions selected for --target')
2600 2600
2601 2601 # make sure the target branchmap also contains the one in the base
2602 2602 targetrevs = list(set(baserevs) | set(targetrevs))
2603 2603 targetrevs.sort()
2604 2604
2605 2605 cl = repo.changelog
2606 2606 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
2607 2607 allbaserevs.sort()
2608 2608 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
2609 2609
2610 2610 newrevs = list(alltargetrevs.difference(allbaserevs))
2611 2611 newrevs.sort()
2612 2612
2613 2613 allrevs = frozenset(unfi.changelog.revs())
2614 2614 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
2615 2615 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
2616 2616
2617 2617 def basefilter(repo, visibilityexceptions=None):
2618 2618 return basefilterrevs
2619 2619
2620 2620 def targetfilter(repo, visibilityexceptions=None):
2621 2621 return targetfilterrevs
2622 2622
2623 2623 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
2624 2624 ui.status(msg % (len(allbaserevs), len(newrevs)))
2625 2625 if targetfilterrevs:
2626 2626 msg = b'(%d revisions still filtered)\n'
2627 2627 ui.status(msg % len(targetfilterrevs))
2628 2628
2629 2629 try:
2630 2630 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
2631 2631 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
2632 2632
2633 2633 baserepo = repo.filtered(b'__perf_branchmap_update_base')
2634 2634 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
2635 2635
2636 2636 # try to find an existing branchmap to reuse
2637 2637 subsettable = getbranchmapsubsettable()
2638 2638 candidatefilter = subsettable.get(None)
2639 2639 while candidatefilter is not None:
2640 2640 candidatebm = repo.filtered(candidatefilter).branchmap()
2641 2641 if candidatebm.validfor(baserepo):
2642 2642 filtered = repoview.filterrevs(repo, candidatefilter)
2643 2643 missing = [r for r in allbaserevs if r in filtered]
2644 2644 base = candidatebm.copy()
2645 2645 base.update(baserepo, missing)
2646 2646 break
2647 2647 candidatefilter = subsettable.get(candidatefilter)
2648 2648 else:
2649 2649 # no suitable subset where found
2650 2650 base = branchmap.branchcache()
2651 2651 base.update(baserepo, allbaserevs)
2652 2652
2653 2653 def setup():
2654 2654 x[0] = base.copy()
2655 2655 if clearcaches:
2656 2656 unfi._revbranchcache = None
2657 2657 clearchangelog(repo)
2658 2658
2659 2659 def bench():
2660 2660 x[0].update(targetrepo, newrevs)
2661 2661
2662 2662 timer(bench, setup=setup)
2663 2663 fm.end()
2664 2664 finally:
2665 2665 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
2666 2666 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
2667 2667
2668 2668 @command(b'perfbranchmapload', [
2669 2669 (b'f', b'filter', b'', b'Specify repoview filter'),
2670 2670 (b'', b'list', False, b'List brachmap filter caches'),
2671 2671 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
2672 2672
2673 2673 ] + formatteropts)
2674 2674 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
2675 2675 """benchmark reading the branchmap"""
2676 2676 opts = _byteskwargs(opts)
2677 2677 clearrevlogs = opts[b'clear_revlogs']
2678 2678
2679 2679 if list:
2680 2680 for name, kind, st in repo.cachevfs.readdir(stat=True):
2681 2681 if name.startswith(b'branch2'):
2682 2682 filtername = name.partition(b'-')[2] or b'unfiltered'
2683 2683 ui.status(b'%s - %s\n'
2684 2684 % (filtername, util.bytecount(st.st_size)))
2685 2685 return
2686 2686 if not filter:
2687 2687 filter = None
2688 2688 subsettable = getbranchmapsubsettable()
2689 2689 if filter is None:
2690 2690 repo = repo.unfiltered()
2691 2691 else:
2692 2692 repo = repoview.repoview(repo, filter)
2693 2693
2694 2694 repo.branchmap() # make sure we have a relevant, up to date branchmap
2695 2695
2696 2696 try:
2697 2697 fromfile = branchmap.branchcache.fromfile
2698 2698 except AttributeError:
2699 2699 # older versions
2700 2700 fromfile = branchmap.read
2701 2701
2702 2702 currentfilter = filter
2703 2703 # try once without timer, the filter may not be cached
2704 2704 while fromfile(repo) is None:
2705 2705 currentfilter = subsettable.get(currentfilter)
2706 2706 if currentfilter is None:
2707 2707 raise error.Abort(b'No branchmap cached for %s repo'
2708 2708 % (filter or b'unfiltered'))
2709 2709 repo = repo.filtered(currentfilter)
2710 2710 timer, fm = gettimer(ui, opts)
2711 2711 def setup():
2712 2712 if clearrevlogs:
2713 2713 clearchangelog(repo)
2714 2714 def bench():
2715 2715 fromfile(repo)
2716 2716 timer(bench, setup=setup)
2717 2717 fm.end()
2718 2718
2719 2719 @command(b'perfloadmarkers')
2720 2720 def perfloadmarkers(ui, repo):
2721 2721 """benchmark the time to parse the on-disk markers for a repo
2722 2722
2723 2723 Result is the number of markers in the repo."""
2724 2724 timer, fm = gettimer(ui)
2725 2725 svfs = getsvfs(repo)
2726 2726 timer(lambda: len(obsolete.obsstore(svfs)))
2727 2727 fm.end()
2728 2728
2729 2729 @command(b'perflrucachedict', formatteropts +
2730 2730 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
2731 2731 (b'', b'mincost', 0, b'smallest cost of items in cache'),
2732 2732 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
2733 2733 (b'', b'size', 4, b'size of cache'),
2734 2734 (b'', b'gets', 10000, b'number of key lookups'),
2735 2735 (b'', b'sets', 10000, b'number of key sets'),
2736 2736 (b'', b'mixed', 10000, b'number of mixed mode operations'),
2737 2737 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
2738 2738 norepo=True)
2739 2739 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
2740 2740 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
2741 2741 opts = _byteskwargs(opts)
2742 2742
2743 2743 def doinit():
2744 2744 for i in _xrange(10000):
2745 2745 util.lrucachedict(size)
2746 2746
2747 2747 costrange = list(range(mincost, maxcost + 1))
2748 2748
2749 2749 values = []
2750 2750 for i in _xrange(size):
2751 2751 values.append(random.randint(0, _maxint))
2752 2752
2753 2753 # Get mode fills the cache and tests raw lookup performance with no
2754 2754 # eviction.
2755 2755 getseq = []
2756 2756 for i in _xrange(gets):
2757 2757 getseq.append(random.choice(values))
2758 2758
2759 2759 def dogets():
2760 2760 d = util.lrucachedict(size)
2761 2761 for v in values:
2762 2762 d[v] = v
2763 2763 for key in getseq:
2764 2764 value = d[key]
2765 2765 value # silence pyflakes warning
2766 2766
2767 2767 def dogetscost():
2768 2768 d = util.lrucachedict(size, maxcost=costlimit)
2769 2769 for i, v in enumerate(values):
2770 2770 d.insert(v, v, cost=costs[i])
2771 2771 for key in getseq:
2772 2772 try:
2773 2773 value = d[key]
2774 2774 value # silence pyflakes warning
2775 2775 except KeyError:
2776 2776 pass
2777 2777
2778 2778 # Set mode tests insertion speed with cache eviction.
2779 2779 setseq = []
2780 2780 costs = []
2781 2781 for i in _xrange(sets):
2782 2782 setseq.append(random.randint(0, _maxint))
2783 2783 costs.append(random.choice(costrange))
2784 2784
2785 2785 def doinserts():
2786 2786 d = util.lrucachedict(size)
2787 2787 for v in setseq:
2788 2788 d.insert(v, v)
2789 2789
2790 2790 def doinsertscost():
2791 2791 d = util.lrucachedict(size, maxcost=costlimit)
2792 2792 for i, v in enumerate(setseq):
2793 2793 d.insert(v, v, cost=costs[i])
2794 2794
2795 2795 def dosets():
2796 2796 d = util.lrucachedict(size)
2797 2797 for v in setseq:
2798 2798 d[v] = v
2799 2799
2800 2800 # Mixed mode randomly performs gets and sets with eviction.
2801 2801 mixedops = []
2802 2802 for i in _xrange(mixed):
2803 2803 r = random.randint(0, 100)
2804 2804 if r < mixedgetfreq:
2805 2805 op = 0
2806 2806 else:
2807 2807 op = 1
2808 2808
2809 2809 mixedops.append((op,
2810 2810 random.randint(0, size * 2),
2811 2811 random.choice(costrange)))
2812 2812
2813 2813 def domixed():
2814 2814 d = util.lrucachedict(size)
2815 2815
2816 2816 for op, v, cost in mixedops:
2817 2817 if op == 0:
2818 2818 try:
2819 2819 d[v]
2820 2820 except KeyError:
2821 2821 pass
2822 2822 else:
2823 2823 d[v] = v
2824 2824
2825 2825 def domixedcost():
2826 2826 d = util.lrucachedict(size, maxcost=costlimit)
2827 2827
2828 2828 for op, v, cost in mixedops:
2829 2829 if op == 0:
2830 2830 try:
2831 2831 d[v]
2832 2832 except KeyError:
2833 2833 pass
2834 2834 else:
2835 2835 d.insert(v, v, cost=cost)
2836 2836
2837 2837 benches = [
2838 2838 (doinit, b'init'),
2839 2839 ]
2840 2840
2841 2841 if costlimit:
2842 2842 benches.extend([
2843 2843 (dogetscost, b'gets w/ cost limit'),
2844 2844 (doinsertscost, b'inserts w/ cost limit'),
2845 2845 (domixedcost, b'mixed w/ cost limit'),
2846 2846 ])
2847 2847 else:
2848 2848 benches.extend([
2849 2849 (dogets, b'gets'),
2850 2850 (doinserts, b'inserts'),
2851 2851 (dosets, b'sets'),
2852 2852 (domixed, b'mixed')
2853 2853 ])
2854 2854
2855 2855 for fn, title in benches:
2856 2856 timer, fm = gettimer(ui, opts)
2857 2857 timer(fn, title=title)
2858 2858 fm.end()
2859 2859
2860 2860 @command(b'perfwrite', formatteropts)
2861 2861 def perfwrite(ui, repo, **opts):
2862 2862 """microbenchmark ui.write
2863 2863 """
2864 2864 opts = _byteskwargs(opts)
2865 2865
2866 2866 timer, fm = gettimer(ui, opts)
2867 2867 def write():
2868 2868 for i in range(100000):
2869 2869 ui.write((b'Testing write performance\n'))
2870 2870 timer(write)
2871 2871 fm.end()
2872 2872
2873 2873 def uisetup(ui):
2874 2874 if (util.safehasattr(cmdutil, b'openrevlog') and
2875 2875 not util.safehasattr(commands, b'debugrevlogopts')):
2876 2876 # for "historical portability":
2877 2877 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
2878 2878 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
2879 2879 # openrevlog() should cause failure, because it has been
2880 2880 # available since 3.5 (or 49c583ca48c4).
2881 2881 def openrevlog(orig, repo, cmd, file_, opts):
2882 2882 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
2883 2883 raise error.Abort(b"This version doesn't support --dir option",
2884 2884 hint=b"use 3.5 or later")
2885 2885 return orig(repo, cmd, file_, opts)
2886 2886 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
2887 2887
2888 2888 @command(b'perfprogress', formatteropts + [
2889 2889 (b'', b'topic', b'topic', b'topic for progress messages'),
2890 2890 (b'c', b'total', 1000000, b'total value we are progressing to'),
2891 2891 ], norepo=True)
2892 2892 def perfprogress(ui, topic=None, total=None, **opts):
2893 2893 """printing of progress bars"""
2894 2894 opts = _byteskwargs(opts)
2895 2895
2896 2896 timer, fm = gettimer(ui, opts)
2897 2897
2898 2898 def doprogress():
2899 2899 with ui.makeprogress(topic, total=total) as progress:
2900 2900 for i in pycompat.xrange(total):
2901 2901 progress.increment()
2902 2902
2903 2903 timer(doprogress)
2904 2904 fm.end()
General Comments 0
You need to be logged in to leave comments. Login now