##// END OF EJS Templates
perf: don't depend on pycompat for older Mercurial versions...
Martin von Zweigbergk -
r43053:c0000597 default
parent child Browse files
Show More
@@ -1,3092 +1,3094 b''
1 1 # perf.py - performance test routines
2 2 '''helper extension to measure performance
3 3
4 4 Configurations
5 5 ==============
6 6
7 7 ``perf``
8 8 --------
9 9
10 10 ``all-timing``
11 11 When set, additional statistics will be reported for each benchmark: best,
12 12 worst, median average. If not set only the best timing is reported
13 13 (default: off).
14 14
15 15 ``presleep``
16 16 number of second to wait before any group of runs (default: 1)
17 17
18 18 ``pre-run``
19 19 number of run to perform before starting measurement.
20 20
21 21 ``profile-benchmark``
22 22 Enable profiling for the benchmarked section.
23 23 (The first iteration is benchmarked)
24 24
25 25 ``run-limits``
26 26 Control the number of runs each benchmark will perform. The option value
27 27 should be a list of `<time>-<numberofrun>` pairs. After each run the
28 28 conditions are considered in order with the following logic:
29 29
30 30 If benchmark has been running for <time> seconds, and we have performed
31 31 <numberofrun> iterations, stop the benchmark,
32 32
33 33 The default value is: `3.0-100, 10.0-3`
34 34
35 35 ``stub``
36 36 When set, benchmarks will only be run once, useful for testing
37 37 (default: off)
38 38 '''
39 39
40 40 # "historical portability" policy of perf.py:
41 41 #
42 42 # We have to do:
43 43 # - make perf.py "loadable" with as wide Mercurial version as possible
44 44 # This doesn't mean that perf commands work correctly with that Mercurial.
45 45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
46 46 # - make historical perf command work correctly with as wide Mercurial
47 47 # version as possible
48 48 #
49 49 # We have to do, if possible with reasonable cost:
50 50 # - make recent perf command for historical feature work correctly
51 51 # with early Mercurial
52 52 #
53 53 # We don't have to do:
54 54 # - make perf command for recent feature work correctly with early
55 55 # Mercurial
56 56
57 57 from __future__ import absolute_import
58 58 import contextlib
59 59 import functools
60 60 import gc
61 61 import os
62 62 import random
63 63 import shutil
64 64 import struct
65 65 import sys
66 66 import tempfile
67 67 import threading
68 68 import time
69 69 from mercurial import (
70 70 changegroup,
71 71 cmdutil,
72 72 commands,
73 73 copies,
74 74 error,
75 75 extensions,
76 76 hg,
77 77 mdiff,
78 78 merge,
79 79 revlog,
80 80 util,
81 81 )
82 82
83 83 # for "historical portability":
84 84 # try to import modules separately (in dict order), and ignore
85 85 # failure, because these aren't available with early Mercurial
86 86 try:
87 87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
88 88 except ImportError:
89 89 pass
90 90 try:
91 91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
92 92 except ImportError:
93 93 pass
94 94 try:
95 95 from mercurial import registrar # since 3.7 (or 37d50250b696)
96 96 dir(registrar) # forcibly load it
97 97 except ImportError:
98 98 registrar = None
99 99 try:
100 100 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
101 101 except ImportError:
102 102 pass
103 103 try:
104 104 from mercurial.utils import repoviewutil # since 5.0
105 105 except ImportError:
106 106 repoviewutil = None
107 107 try:
108 108 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
109 109 except ImportError:
110 110 pass
111 111 try:
112 112 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
113 113 except ImportError:
114 114 pass
115 115
116 116 try:
117 117 from mercurial import profiling
118 118 except ImportError:
119 119 profiling = None
120 120
121 121 def identity(a):
122 122 return a
123 123
124 124 try:
125 125 from mercurial import pycompat
126 126 getargspec = pycompat.getargspec # added to module after 4.5
127 127 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
128 128 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
129 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
129 130 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
130 131 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
131 132 if pycompat.ispy3:
132 133 _maxint = sys.maxsize # per py3 docs for replacing maxint
133 134 else:
134 135 _maxint = sys.maxint
135 136 except (NameError, ImportError, AttributeError):
136 137 import inspect
137 138 getargspec = inspect.getargspec
138 139 _byteskwargs = identity
140 _bytestr = str
139 141 fsencode = identity # no py3 support
140 142 _maxint = sys.maxint # no py3 support
141 143 _sysstr = lambda x: x # no py3 support
142 144 _xrange = xrange
143 145
144 146 try:
145 147 # 4.7+
146 148 queue = pycompat.queue.Queue
147 149 except (NameError, AttributeError, ImportError):
148 150 # <4.7.
149 151 try:
150 152 queue = pycompat.queue
151 153 except (NameError, AttributeError, ImportError):
152 154 import Queue as queue
153 155
154 156 try:
155 157 from mercurial import logcmdutil
156 158 makelogtemplater = logcmdutil.maketemplater
157 159 except (AttributeError, ImportError):
158 160 try:
159 161 makelogtemplater = cmdutil.makelogtemplater
160 162 except (AttributeError, ImportError):
161 163 makelogtemplater = None
162 164
163 165 # for "historical portability":
164 166 # define util.safehasattr forcibly, because util.safehasattr has been
165 167 # available since 1.9.3 (or 94b200a11cf7)
166 168 _undefined = object()
167 169 def safehasattr(thing, attr):
168 170 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
169 171 setattr(util, 'safehasattr', safehasattr)
170 172
171 173 # for "historical portability":
172 174 # define util.timer forcibly, because util.timer has been available
173 175 # since ae5d60bb70c9
174 176 if safehasattr(time, 'perf_counter'):
175 177 util.timer = time.perf_counter
176 178 elif os.name == b'nt':
177 179 util.timer = time.clock
178 180 else:
179 181 util.timer = time.time
180 182
181 183 # for "historical portability":
182 184 # use locally defined empty option list, if formatteropts isn't
183 185 # available, because commands.formatteropts has been available since
184 186 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
185 187 # available since 2.2 (or ae5f92e154d3)
186 188 formatteropts = getattr(cmdutil, "formatteropts",
187 189 getattr(commands, "formatteropts", []))
188 190
189 191 # for "historical portability":
190 192 # use locally defined option list, if debugrevlogopts isn't available,
191 193 # because commands.debugrevlogopts has been available since 3.7 (or
192 194 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
193 195 # since 1.9 (or a79fea6b3e77).
194 196 revlogopts = getattr(cmdutil, "debugrevlogopts",
195 197 getattr(commands, "debugrevlogopts", [
196 198 (b'c', b'changelog', False, (b'open changelog')),
197 199 (b'm', b'manifest', False, (b'open manifest')),
198 200 (b'', b'dir', False, (b'open directory manifest')),
199 201 ]))
200 202
201 203 cmdtable = {}
202 204
203 205 # for "historical portability":
204 206 # define parsealiases locally, because cmdutil.parsealiases has been
205 207 # available since 1.5 (or 6252852b4332)
206 208 def parsealiases(cmd):
207 209 return cmd.split(b"|")
208 210
209 211 if safehasattr(registrar, 'command'):
210 212 command = registrar.command(cmdtable)
211 213 elif safehasattr(cmdutil, 'command'):
212 214 command = cmdutil.command(cmdtable)
213 215 if b'norepo' not in getargspec(command).args:
214 216 # for "historical portability":
215 217 # wrap original cmdutil.command, because "norepo" option has
216 218 # been available since 3.1 (or 75a96326cecb)
217 219 _command = command
218 220 def command(name, options=(), synopsis=None, norepo=False):
219 221 if norepo:
220 222 commands.norepo += b' %s' % b' '.join(parsealiases(name))
221 223 return _command(name, list(options), synopsis)
222 224 else:
223 225 # for "historical portability":
224 226 # define "@command" annotation locally, because cmdutil.command
225 227 # has been available since 1.9 (or 2daa5179e73f)
226 228 def command(name, options=(), synopsis=None, norepo=False):
227 229 def decorator(func):
228 230 if synopsis:
229 231 cmdtable[name] = func, list(options), synopsis
230 232 else:
231 233 cmdtable[name] = func, list(options)
232 234 if norepo:
233 235 commands.norepo += b' %s' % b' '.join(parsealiases(name))
234 236 return func
235 237 return decorator
236 238
237 239 try:
238 240 import mercurial.registrar
239 241 import mercurial.configitems
240 242 configtable = {}
241 243 configitem = mercurial.registrar.configitem(configtable)
242 244 configitem(b'perf', b'presleep',
243 245 default=mercurial.configitems.dynamicdefault,
244 246 experimental=True,
245 247 )
246 248 configitem(b'perf', b'stub',
247 249 default=mercurial.configitems.dynamicdefault,
248 250 experimental=True,
249 251 )
250 252 configitem(b'perf', b'parentscount',
251 253 default=mercurial.configitems.dynamicdefault,
252 254 experimental=True,
253 255 )
254 256 configitem(b'perf', b'all-timing',
255 257 default=mercurial.configitems.dynamicdefault,
256 258 experimental=True,
257 259 )
258 260 configitem(b'perf', b'pre-run',
259 261 default=mercurial.configitems.dynamicdefault,
260 262 )
261 263 configitem(b'perf', b'profile-benchmark',
262 264 default=mercurial.configitems.dynamicdefault,
263 265 )
264 266 configitem(b'perf', b'run-limits',
265 267 default=mercurial.configitems.dynamicdefault,
266 268 experimental=True,
267 269 )
268 270 except (ImportError, AttributeError):
269 271 pass
270 272 except TypeError:
271 273 # compatibility fix for a11fd395e83f
272 274 # hg version: 5.2
273 275 configitem(b'perf', b'presleep',
274 276 default=mercurial.configitems.dynamicdefault,
275 277 )
276 278 configitem(b'perf', b'stub',
277 279 default=mercurial.configitems.dynamicdefault,
278 280 )
279 281 configitem(b'perf', b'parentscount',
280 282 default=mercurial.configitems.dynamicdefault,
281 283 )
282 284 configitem(b'perf', b'all-timing',
283 285 default=mercurial.configitems.dynamicdefault,
284 286 )
285 287 configitem(b'perf', b'pre-run',
286 288 default=mercurial.configitems.dynamicdefault,
287 289 )
288 290 configitem(b'perf', b'profile-benchmark',
289 291 default=mercurial.configitems.dynamicdefault,
290 292 )
291 293 configitem(b'perf', b'run-limits',
292 294 default=mercurial.configitems.dynamicdefault,
293 295 )
294 296
295 297 def getlen(ui):
296 298 if ui.configbool(b"perf", b"stub", False):
297 299 return lambda x: 1
298 300 return len
299 301
300 302 class noop(object):
301 303 """dummy context manager"""
302 304 def __enter__(self):
303 305 pass
304 306 def __exit__(self, *args):
305 307 pass
306 308
307 309 NOOPCTX = noop()
308 310
309 311 def gettimer(ui, opts=None):
310 312 """return a timer function and formatter: (timer, formatter)
311 313
312 314 This function exists to gather the creation of formatter in a single
313 315 place instead of duplicating it in all performance commands."""
314 316
315 317 # enforce an idle period before execution to counteract power management
316 318 # experimental config: perf.presleep
317 319 time.sleep(getint(ui, b"perf", b"presleep", 1))
318 320
319 321 if opts is None:
320 322 opts = {}
321 323 # redirect all to stderr unless buffer api is in use
322 324 if not ui._buffers:
323 325 ui = ui.copy()
324 326 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
325 327 if uifout:
326 328 # for "historical portability":
327 329 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
328 330 uifout.set(ui.ferr)
329 331
330 332 # get a formatter
331 333 uiformatter = getattr(ui, 'formatter', None)
332 334 if uiformatter:
333 335 fm = uiformatter(b'perf', opts)
334 336 else:
335 337 # for "historical portability":
336 338 # define formatter locally, because ui.formatter has been
337 339 # available since 2.2 (or ae5f92e154d3)
338 340 from mercurial import node
339 341 class defaultformatter(object):
340 342 """Minimized composition of baseformatter and plainformatter
341 343 """
342 344 def __init__(self, ui, topic, opts):
343 345 self._ui = ui
344 346 if ui.debugflag:
345 347 self.hexfunc = node.hex
346 348 else:
347 349 self.hexfunc = node.short
348 350 def __nonzero__(self):
349 351 return False
350 352 __bool__ = __nonzero__
351 353 def startitem(self):
352 354 pass
353 355 def data(self, **data):
354 356 pass
355 357 def write(self, fields, deftext, *fielddata, **opts):
356 358 self._ui.write(deftext % fielddata, **opts)
357 359 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
358 360 if cond:
359 361 self._ui.write(deftext % fielddata, **opts)
360 362 def plain(self, text, **opts):
361 363 self._ui.write(text, **opts)
362 364 def end(self):
363 365 pass
364 366 fm = defaultformatter(ui, b'perf', opts)
365 367
366 368 # stub function, runs code only once instead of in a loop
367 369 # experimental config: perf.stub
368 370 if ui.configbool(b"perf", b"stub", False):
369 371 return functools.partial(stub_timer, fm), fm
370 372
371 373 # experimental config: perf.all-timing
372 374 displayall = ui.configbool(b"perf", b"all-timing", False)
373 375
374 376 # experimental config: perf.run-limits
375 377 limitspec = ui.configlist(b"perf", b"run-limits", [])
376 378 limits = []
377 379 for item in limitspec:
378 380 parts = item.split(b'-', 1)
379 381 if len(parts) < 2:
380 382 ui.warn((b'malformatted run limit entry, missing "-": %s\n'
381 383 % item))
382 384 continue
383 385 try:
384 time_limit = float(pycompat.sysstr(parts[0]))
386 time_limit = float(_sysstr(parts[0]))
385 387 except ValueError as e:
386 388 ui.warn((b'malformatted run limit entry, %s: %s\n'
387 % (pycompat.bytestr(e), item)))
389 % (_bytestr(e), item)))
388 390 continue
389 391 try:
390 run_limit = int(pycompat.sysstr(parts[1]))
392 run_limit = int(_sysstr(parts[1]))
391 393 except ValueError as e:
392 394 ui.warn((b'malformatted run limit entry, %s: %s\n'
393 % (pycompat.bytestr(e), item)))
395 % (_bytestr(e), item)))
394 396 continue
395 397 limits.append((time_limit, run_limit))
396 398 if not limits:
397 399 limits = DEFAULTLIMITS
398 400
399 401 profiler = None
400 402 if profiling is not None:
401 403 if ui.configbool(b"perf", b"profile-benchmark", False):
402 404 profiler = profiling.profile(ui)
403 405
404 406 prerun = getint(ui, b"perf", b"pre-run", 0)
405 407 t = functools.partial(_timer, fm, displayall=displayall, limits=limits,
406 408 prerun=prerun, profiler=profiler)
407 409 return t, fm
408 410
409 411 def stub_timer(fm, func, setup=None, title=None):
410 412 if setup is not None:
411 413 setup()
412 414 func()
413 415
414 416 @contextlib.contextmanager
415 417 def timeone():
416 418 r = []
417 419 ostart = os.times()
418 420 cstart = util.timer()
419 421 yield r
420 422 cstop = util.timer()
421 423 ostop = os.times()
422 424 a, b = ostart, ostop
423 425 r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
424 426
425 427
426 428 # list of stop condition (elapsed time, minimal run count)
427 429 DEFAULTLIMITS = (
428 430 (3.0, 100),
429 431 (10.0, 3),
430 432 )
431 433
432 434 def _timer(fm, func, setup=None, title=None, displayall=False,
433 435 limits=DEFAULTLIMITS, prerun=0, profiler=None):
434 436 gc.collect()
435 437 results = []
436 438 begin = util.timer()
437 439 count = 0
438 440 if profiler is None:
439 441 profiler = NOOPCTX
440 442 for i in range(prerun):
441 443 if setup is not None:
442 444 setup()
443 445 func()
444 446 keepgoing = True
445 447 while keepgoing:
446 448 if setup is not None:
447 449 setup()
448 450 with profiler:
449 451 with timeone() as item:
450 452 r = func()
451 453 profiler = NOOPCTX
452 454 count += 1
453 455 results.append(item[0])
454 456 cstop = util.timer()
455 457 # Look for a stop condition.
456 458 elapsed = cstop - begin
457 459 for t, mincount in limits:
458 460 if elapsed >= t and count >= mincount:
459 461 keepgoing = False
460 462 break
461 463
462 464 formatone(fm, results, title=title, result=r,
463 465 displayall=displayall)
464 466
465 467 def formatone(fm, timings, title=None, result=None, displayall=False):
466 468
467 469 count = len(timings)
468 470
469 471 fm.startitem()
470 472
471 473 if title:
472 474 fm.write(b'title', b'! %s\n', title)
473 475 if result:
474 476 fm.write(b'result', b'! result: %s\n', result)
475 477 def display(role, entry):
476 478 prefix = b''
477 479 if role != b'best':
478 480 prefix = b'%s.' % role
479 481 fm.plain(b'!')
480 482 fm.write(prefix + b'wall', b' wall %f', entry[0])
481 483 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
482 484 fm.write(prefix + b'user', b' user %f', entry[1])
483 485 fm.write(prefix + b'sys', b' sys %f', entry[2])
484 486 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
485 487 fm.plain(b'\n')
486 488 timings.sort()
487 489 min_val = timings[0]
488 490 display(b'best', min_val)
489 491 if displayall:
490 492 max_val = timings[-1]
491 493 display(b'max', max_val)
492 494 avg = tuple([sum(x) / count for x in zip(*timings)])
493 495 display(b'avg', avg)
494 496 median = timings[len(timings) // 2]
495 497 display(b'median', median)
496 498
497 499 # utilities for historical portability
498 500
499 501 def getint(ui, section, name, default):
500 502 # for "historical portability":
501 503 # ui.configint has been available since 1.9 (or fa2b596db182)
502 504 v = ui.config(section, name, None)
503 505 if v is None:
504 506 return default
505 507 try:
506 508 return int(v)
507 509 except ValueError:
508 510 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
509 511 % (section, name, v))
510 512
511 513 def safeattrsetter(obj, name, ignoremissing=False):
512 514 """Ensure that 'obj' has 'name' attribute before subsequent setattr
513 515
514 516 This function is aborted, if 'obj' doesn't have 'name' attribute
515 517 at runtime. This avoids overlooking removal of an attribute, which
516 518 breaks assumption of performance measurement, in the future.
517 519
518 520 This function returns the object to (1) assign a new value, and
519 521 (2) restore an original value to the attribute.
520 522
521 523 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
522 524 abortion, and this function returns None. This is useful to
523 525 examine an attribute, which isn't ensured in all Mercurial
524 526 versions.
525 527 """
526 528 if not util.safehasattr(obj, name):
527 529 if ignoremissing:
528 530 return None
529 531 raise error.Abort((b"missing attribute %s of %s might break assumption"
530 532 b" of performance measurement") % (name, obj))
531 533
532 534 origvalue = getattr(obj, _sysstr(name))
533 535 class attrutil(object):
534 536 def set(self, newvalue):
535 537 setattr(obj, _sysstr(name), newvalue)
536 538 def restore(self):
537 539 setattr(obj, _sysstr(name), origvalue)
538 540
539 541 return attrutil()
540 542
541 543 # utilities to examine each internal API changes
542 544
543 545 def getbranchmapsubsettable():
544 546 # for "historical portability":
545 547 # subsettable is defined in:
546 548 # - branchmap since 2.9 (or 175c6fd8cacc)
547 549 # - repoview since 2.5 (or 59a9f18d4587)
548 550 # - repoviewutil since 5.0
549 551 for mod in (branchmap, repoview, repoviewutil):
550 552 subsettable = getattr(mod, 'subsettable', None)
551 553 if subsettable:
552 554 return subsettable
553 555
554 556 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
555 557 # branchmap and repoview modules exist, but subsettable attribute
556 558 # doesn't)
557 559 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
558 560 hint=b"use 2.5 or later")
559 561
560 562 def getsvfs(repo):
561 563 """Return appropriate object to access files under .hg/store
562 564 """
563 565 # for "historical portability":
564 566 # repo.svfs has been available since 2.3 (or 7034365089bf)
565 567 svfs = getattr(repo, 'svfs', None)
566 568 if svfs:
567 569 return svfs
568 570 else:
569 571 return getattr(repo, 'sopener')
570 572
571 573 def getvfs(repo):
572 574 """Return appropriate object to access files under .hg
573 575 """
574 576 # for "historical portability":
575 577 # repo.vfs has been available since 2.3 (or 7034365089bf)
576 578 vfs = getattr(repo, 'vfs', None)
577 579 if vfs:
578 580 return vfs
579 581 else:
580 582 return getattr(repo, 'opener')
581 583
582 584 def repocleartagscachefunc(repo):
583 585 """Return the function to clear tags cache according to repo internal API
584 586 """
585 587 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
586 588 # in this case, setattr(repo, '_tagscache', None) or so isn't
587 589 # correct way to clear tags cache, because existing code paths
588 590 # expect _tagscache to be a structured object.
589 591 def clearcache():
590 592 # _tagscache has been filteredpropertycache since 2.5 (or
591 593 # 98c867ac1330), and delattr() can't work in such case
592 594 if b'_tagscache' in vars(repo):
593 595 del repo.__dict__[b'_tagscache']
594 596 return clearcache
595 597
596 598 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
597 599 if repotags: # since 1.4 (or 5614a628d173)
598 600 return lambda : repotags.set(None)
599 601
600 602 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
601 603 if repotagscache: # since 0.6 (or d7df759d0e97)
602 604 return lambda : repotagscache.set(None)
603 605
604 606 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
605 607 # this point, but it isn't so problematic, because:
606 608 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
607 609 # in perftags() causes failure soon
608 610 # - perf.py itself has been available since 1.1 (or eb240755386d)
609 611 raise error.Abort((b"tags API of this hg command is unknown"))
610 612
611 613 # utilities to clear cache
612 614
613 615 def clearfilecache(obj, attrname):
614 616 unfiltered = getattr(obj, 'unfiltered', None)
615 617 if unfiltered is not None:
616 618 obj = obj.unfiltered()
617 619 if attrname in vars(obj):
618 620 delattr(obj, attrname)
619 621 obj._filecache.pop(attrname, None)
620 622
621 623 def clearchangelog(repo):
622 624 if repo is not repo.unfiltered():
623 625 object.__setattr__(repo, r'_clcachekey', None)
624 626 object.__setattr__(repo, r'_clcache', None)
625 627 clearfilecache(repo.unfiltered(), 'changelog')
626 628
627 629 # perf commands
628 630
629 631 @command(b'perfwalk', formatteropts)
630 632 def perfwalk(ui, repo, *pats, **opts):
631 633 opts = _byteskwargs(opts)
632 634 timer, fm = gettimer(ui, opts)
633 635 m = scmutil.match(repo[None], pats, {})
634 636 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
635 637 ignored=False))))
636 638 fm.end()
637 639
638 640 @command(b'perfannotate', formatteropts)
639 641 def perfannotate(ui, repo, f, **opts):
640 642 opts = _byteskwargs(opts)
641 643 timer, fm = gettimer(ui, opts)
642 644 fc = repo[b'.'][f]
643 645 timer(lambda: len(fc.annotate(True)))
644 646 fm.end()
645 647
646 648 @command(b'perfstatus',
647 649 [(b'u', b'unknown', False,
648 650 b'ask status to look for unknown files')] + formatteropts)
649 651 def perfstatus(ui, repo, **opts):
650 652 opts = _byteskwargs(opts)
651 653 #m = match.always(repo.root, repo.getcwd())
652 654 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
653 655 # False))))
654 656 timer, fm = gettimer(ui, opts)
655 657 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
656 658 fm.end()
657 659
658 660 @command(b'perfaddremove', formatteropts)
659 661 def perfaddremove(ui, repo, **opts):
660 662 opts = _byteskwargs(opts)
661 663 timer, fm = gettimer(ui, opts)
662 664 try:
663 665 oldquiet = repo.ui.quiet
664 666 repo.ui.quiet = True
665 667 matcher = scmutil.match(repo[None])
666 668 opts[b'dry_run'] = True
667 669 if b'uipathfn' in getargspec(scmutil.addremove).args:
668 670 uipathfn = scmutil.getuipathfn(repo)
669 671 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
670 672 else:
671 673 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
672 674 finally:
673 675 repo.ui.quiet = oldquiet
674 676 fm.end()
675 677
676 678 def clearcaches(cl):
677 679 # behave somewhat consistently across internal API changes
678 680 if util.safehasattr(cl, b'clearcaches'):
679 681 cl.clearcaches()
680 682 elif util.safehasattr(cl, b'_nodecache'):
681 683 from mercurial.node import nullid, nullrev
682 684 cl._nodecache = {nullid: nullrev}
683 685 cl._nodepos = None
684 686
685 687 @command(b'perfheads', formatteropts)
686 688 def perfheads(ui, repo, **opts):
687 689 """benchmark the computation of a changelog heads"""
688 690 opts = _byteskwargs(opts)
689 691 timer, fm = gettimer(ui, opts)
690 692 cl = repo.changelog
691 693 def s():
692 694 clearcaches(cl)
693 695 def d():
694 696 len(cl.headrevs())
695 697 timer(d, setup=s)
696 698 fm.end()
697 699
698 700 @command(b'perftags', formatteropts+
699 701 [
700 702 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
701 703 ])
702 704 def perftags(ui, repo, **opts):
703 705 opts = _byteskwargs(opts)
704 706 timer, fm = gettimer(ui, opts)
705 707 repocleartagscache = repocleartagscachefunc(repo)
706 708 clearrevlogs = opts[b'clear_revlogs']
707 709 def s():
708 710 if clearrevlogs:
709 711 clearchangelog(repo)
710 712 clearfilecache(repo.unfiltered(), 'manifest')
711 713 repocleartagscache()
712 714 def t():
713 715 return len(repo.tags())
714 716 timer(t, setup=s)
715 717 fm.end()
716 718
717 719 @command(b'perfancestors', formatteropts)
718 720 def perfancestors(ui, repo, **opts):
719 721 opts = _byteskwargs(opts)
720 722 timer, fm = gettimer(ui, opts)
721 723 heads = repo.changelog.headrevs()
722 724 def d():
723 725 for a in repo.changelog.ancestors(heads):
724 726 pass
725 727 timer(d)
726 728 fm.end()
727 729
728 730 @command(b'perfancestorset', formatteropts)
729 731 def perfancestorset(ui, repo, revset, **opts):
730 732 opts = _byteskwargs(opts)
731 733 timer, fm = gettimer(ui, opts)
732 734 revs = repo.revs(revset)
733 735 heads = repo.changelog.headrevs()
734 736 def d():
735 737 s = repo.changelog.ancestors(heads)
736 738 for rev in revs:
737 739 rev in s
738 740 timer(d)
739 741 fm.end()
740 742
741 743 @command(b'perfdiscovery', formatteropts, b'PATH')
742 744 def perfdiscovery(ui, repo, path, **opts):
743 745 """benchmark discovery between local repo and the peer at given path
744 746 """
745 747 repos = [repo, None]
746 748 timer, fm = gettimer(ui, opts)
747 749 path = ui.expandpath(path)
748 750
749 751 def s():
750 752 repos[1] = hg.peer(ui, opts, path)
751 753 def d():
752 754 setdiscovery.findcommonheads(ui, *repos)
753 755 timer(d, setup=s)
754 756 fm.end()
755 757
756 758 @command(b'perfbookmarks', formatteropts +
757 759 [
758 760 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
759 761 ])
760 762 def perfbookmarks(ui, repo, **opts):
761 763 """benchmark parsing bookmarks from disk to memory"""
762 764 opts = _byteskwargs(opts)
763 765 timer, fm = gettimer(ui, opts)
764 766
765 767 clearrevlogs = opts[b'clear_revlogs']
766 768 def s():
767 769 if clearrevlogs:
768 770 clearchangelog(repo)
769 771 clearfilecache(repo, b'_bookmarks')
770 772 def d():
771 773 repo._bookmarks
772 774 timer(d, setup=s)
773 775 fm.end()
774 776
775 777 @command(b'perfbundleread', formatteropts, b'BUNDLE')
776 778 def perfbundleread(ui, repo, bundlepath, **opts):
777 779 """Benchmark reading of bundle files.
778 780
779 781 This command is meant to isolate the I/O part of bundle reading as
780 782 much as possible.
781 783 """
782 784 from mercurial import (
783 785 bundle2,
784 786 exchange,
785 787 streamclone,
786 788 )
787 789
788 790 opts = _byteskwargs(opts)
789 791
790 792 def makebench(fn):
791 793 def run():
792 794 with open(bundlepath, b'rb') as fh:
793 795 bundle = exchange.readbundle(ui, fh, bundlepath)
794 796 fn(bundle)
795 797
796 798 return run
797 799
798 800 def makereadnbytes(size):
799 801 def run():
800 802 with open(bundlepath, b'rb') as fh:
801 803 bundle = exchange.readbundle(ui, fh, bundlepath)
802 804 while bundle.read(size):
803 805 pass
804 806
805 807 return run
806 808
807 809 def makestdioread(size):
808 810 def run():
809 811 with open(bundlepath, b'rb') as fh:
810 812 while fh.read(size):
811 813 pass
812 814
813 815 return run
814 816
815 817 # bundle1
816 818
817 819 def deltaiter(bundle):
818 820 for delta in bundle.deltaiter():
819 821 pass
820 822
821 823 def iterchunks(bundle):
822 824 for chunk in bundle.getchunks():
823 825 pass
824 826
825 827 # bundle2
826 828
827 829 def forwardchunks(bundle):
828 830 for chunk in bundle._forwardchunks():
829 831 pass
830 832
831 833 def iterparts(bundle):
832 834 for part in bundle.iterparts():
833 835 pass
834 836
835 837 def iterpartsseekable(bundle):
836 838 for part in bundle.iterparts(seekable=True):
837 839 pass
838 840
839 841 def seek(bundle):
840 842 for part in bundle.iterparts(seekable=True):
841 843 part.seek(0, os.SEEK_END)
842 844
843 845 def makepartreadnbytes(size):
844 846 def run():
845 847 with open(bundlepath, b'rb') as fh:
846 848 bundle = exchange.readbundle(ui, fh, bundlepath)
847 849 for part in bundle.iterparts():
848 850 while part.read(size):
849 851 pass
850 852
851 853 return run
852 854
853 855 benches = [
854 856 (makestdioread(8192), b'read(8k)'),
855 857 (makestdioread(16384), b'read(16k)'),
856 858 (makestdioread(32768), b'read(32k)'),
857 859 (makestdioread(131072), b'read(128k)'),
858 860 ]
859 861
860 862 with open(bundlepath, b'rb') as fh:
861 863 bundle = exchange.readbundle(ui, fh, bundlepath)
862 864
863 865 if isinstance(bundle, changegroup.cg1unpacker):
864 866 benches.extend([
865 867 (makebench(deltaiter), b'cg1 deltaiter()'),
866 868 (makebench(iterchunks), b'cg1 getchunks()'),
867 869 (makereadnbytes(8192), b'cg1 read(8k)'),
868 870 (makereadnbytes(16384), b'cg1 read(16k)'),
869 871 (makereadnbytes(32768), b'cg1 read(32k)'),
870 872 (makereadnbytes(131072), b'cg1 read(128k)'),
871 873 ])
872 874 elif isinstance(bundle, bundle2.unbundle20):
873 875 benches.extend([
874 876 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
875 877 (makebench(iterparts), b'bundle2 iterparts()'),
876 878 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
877 879 (makebench(seek), b'bundle2 part seek()'),
878 880 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
879 881 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
880 882 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
881 883 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
882 884 ])
883 885 elif isinstance(bundle, streamclone.streamcloneapplier):
884 886 raise error.Abort(b'stream clone bundles not supported')
885 887 else:
886 888 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
887 889
888 890 for fn, title in benches:
889 891 timer, fm = gettimer(ui, opts)
890 892 timer(fn, title=title)
891 893 fm.end()
892 894
893 895 @command(b'perfchangegroupchangelog', formatteropts +
894 896 [(b'', b'cgversion', b'02', b'changegroup version'),
895 897 (b'r', b'rev', b'', b'revisions to add to changegroup')])
896 898 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
897 899 """Benchmark producing a changelog group for a changegroup.
898 900
899 901 This measures the time spent processing the changelog during a
900 902 bundle operation. This occurs during `hg bundle` and on a server
901 903 processing a `getbundle` wire protocol request (handles clones
902 904 and pull requests).
903 905
904 906 By default, all revisions are added to the changegroup.
905 907 """
906 908 opts = _byteskwargs(opts)
907 909 cl = repo.changelog
908 910 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
909 911 bundler = changegroup.getbundler(cgversion, repo)
910 912
911 913 def d():
912 914 state, chunks = bundler._generatechangelog(cl, nodes)
913 915 for chunk in chunks:
914 916 pass
915 917
916 918 timer, fm = gettimer(ui, opts)
917 919
918 920 # Terminal printing can interfere with timing. So disable it.
919 921 with ui.configoverride({(b'progress', b'disable'): True}):
920 922 timer(d)
921 923
922 924 fm.end()
923 925
924 926 @command(b'perfdirs', formatteropts)
925 927 def perfdirs(ui, repo, **opts):
926 928 opts = _byteskwargs(opts)
927 929 timer, fm = gettimer(ui, opts)
928 930 dirstate = repo.dirstate
929 931 b'a' in dirstate
930 932 def d():
931 933 dirstate.hasdir(b'a')
932 934 del dirstate._map._dirs
933 935 timer(d)
934 936 fm.end()
935 937
936 938 @command(b'perfdirstate', formatteropts)
937 939 def perfdirstate(ui, repo, **opts):
938 940 opts = _byteskwargs(opts)
939 941 timer, fm = gettimer(ui, opts)
940 942 b"a" in repo.dirstate
941 943 def d():
942 944 repo.dirstate.invalidate()
943 945 b"a" in repo.dirstate
944 946 timer(d)
945 947 fm.end()
946 948
947 949 @command(b'perfdirstatedirs', formatteropts)
948 950 def perfdirstatedirs(ui, repo, **opts):
949 951 opts = _byteskwargs(opts)
950 952 timer, fm = gettimer(ui, opts)
951 953 b"a" in repo.dirstate
952 954 def d():
953 955 repo.dirstate.hasdir(b"a")
954 956 del repo.dirstate._map._dirs
955 957 timer(d)
956 958 fm.end()
957 959
958 960 @command(b'perfdirstatefoldmap', formatteropts)
959 961 def perfdirstatefoldmap(ui, repo, **opts):
960 962 opts = _byteskwargs(opts)
961 963 timer, fm = gettimer(ui, opts)
962 964 dirstate = repo.dirstate
963 965 b'a' in dirstate
964 966 def d():
965 967 dirstate._map.filefoldmap.get(b'a')
966 968 del dirstate._map.filefoldmap
967 969 timer(d)
968 970 fm.end()
969 971
970 972 @command(b'perfdirfoldmap', formatteropts)
971 973 def perfdirfoldmap(ui, repo, **opts):
972 974 opts = _byteskwargs(opts)
973 975 timer, fm = gettimer(ui, opts)
974 976 dirstate = repo.dirstate
975 977 b'a' in dirstate
976 978 def d():
977 979 dirstate._map.dirfoldmap.get(b'a')
978 980 del dirstate._map.dirfoldmap
979 981 del dirstate._map._dirs
980 982 timer(d)
981 983 fm.end()
982 984
983 985 @command(b'perfdirstatewrite', formatteropts)
984 986 def perfdirstatewrite(ui, repo, **opts):
985 987 opts = _byteskwargs(opts)
986 988 timer, fm = gettimer(ui, opts)
987 989 ds = repo.dirstate
988 990 b"a" in ds
989 991 def d():
990 992 ds._dirty = True
991 993 ds.write(repo.currenttransaction())
992 994 timer(d)
993 995 fm.end()
994 996
995 997 def _getmergerevs(repo, opts):
996 998 """parse command argument to return rev involved in merge
997 999
998 1000 input: options dictionnary with `rev`, `from` and `bse`
999 1001 output: (localctx, otherctx, basectx)
1000 1002 """
1001 1003 if opts[b'from']:
1002 1004 fromrev = scmutil.revsingle(repo, opts[b'from'])
1003 1005 wctx = repo[fromrev]
1004 1006 else:
1005 1007 wctx = repo[None]
1006 1008 # we don't want working dir files to be stat'd in the benchmark, so
1007 1009 # prime that cache
1008 1010 wctx.dirty()
1009 1011 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1010 1012 if opts[b'base']:
1011 1013 fromrev = scmutil.revsingle(repo, opts[b'base'])
1012 1014 ancestor = repo[fromrev]
1013 1015 else:
1014 1016 ancestor = wctx.ancestor(rctx)
1015 1017 return (wctx, rctx, ancestor)
1016 1018
1017 1019 @command(b'perfmergecalculate',
1018 1020 [
1019 1021 (b'r', b'rev', b'.', b'rev to merge against'),
1020 1022 (b'', b'from', b'', b'rev to merge from'),
1021 1023 (b'', b'base', b'', b'the revision to use as base'),
1022 1024 ] + formatteropts)
1023 1025 def perfmergecalculate(ui, repo, **opts):
1024 1026 opts = _byteskwargs(opts)
1025 1027 timer, fm = gettimer(ui, opts)
1026 1028
1027 1029 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1028 1030 def d():
1029 1031 # acceptremote is True because we don't want prompts in the middle of
1030 1032 # our benchmark
1031 1033 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
1032 1034 acceptremote=True, followcopies=True)
1033 1035 timer(d)
1034 1036 fm.end()
1035 1037
1036 1038 @command(b'perfmergecopies',
1037 1039 [
1038 1040 (b'r', b'rev', b'.', b'rev to merge against'),
1039 1041 (b'', b'from', b'', b'rev to merge from'),
1040 1042 (b'', b'base', b'', b'the revision to use as base'),
1041 1043 ] + formatteropts)
1042 1044 def perfmergecopies(ui, repo, **opts):
1043 1045 """measure runtime of `copies.mergecopies`"""
1044 1046 opts = _byteskwargs(opts)
1045 1047 timer, fm = gettimer(ui, opts)
1046 1048 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1047 1049 def d():
1048 1050 # acceptremote is True because we don't want prompts in the middle of
1049 1051 # our benchmark
1050 1052 copies.mergecopies(repo, wctx, rctx, ancestor)
1051 1053 timer(d)
1052 1054 fm.end()
1053 1055
1054 1056 @command(b'perfpathcopies', [], b"REV REV")
1055 1057 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1056 1058 """benchmark the copy tracing logic"""
1057 1059 opts = _byteskwargs(opts)
1058 1060 timer, fm = gettimer(ui, opts)
1059 1061 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1060 1062 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1061 1063 def d():
1062 1064 copies.pathcopies(ctx1, ctx2)
1063 1065 timer(d)
1064 1066 fm.end()
1065 1067
1066 1068 @command(b'perfphases',
1067 1069 [(b'', b'full', False, b'include file reading time too'),
1068 1070 ], b"")
1069 1071 def perfphases(ui, repo, **opts):
1070 1072 """benchmark phasesets computation"""
1071 1073 opts = _byteskwargs(opts)
1072 1074 timer, fm = gettimer(ui, opts)
1073 1075 _phases = repo._phasecache
1074 1076 full = opts.get(b'full')
1075 1077 def d():
1076 1078 phases = _phases
1077 1079 if full:
1078 1080 clearfilecache(repo, b'_phasecache')
1079 1081 phases = repo._phasecache
1080 1082 phases.invalidate()
1081 1083 phases.loadphaserevs(repo)
1082 1084 timer(d)
1083 1085 fm.end()
1084 1086
1085 1087 @command(b'perfphasesremote',
1086 1088 [], b"[DEST]")
1087 1089 def perfphasesremote(ui, repo, dest=None, **opts):
1088 1090 """benchmark time needed to analyse phases of the remote server"""
1089 1091 from mercurial.node import (
1090 1092 bin,
1091 1093 )
1092 1094 from mercurial import (
1093 1095 exchange,
1094 1096 hg,
1095 1097 phases,
1096 1098 )
1097 1099 opts = _byteskwargs(opts)
1098 1100 timer, fm = gettimer(ui, opts)
1099 1101
1100 1102 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
1101 1103 if not path:
1102 1104 raise error.Abort((b'default repository not configured!'),
1103 1105 hint=(b"see 'hg help config.paths'"))
1104 1106 dest = path.pushloc or path.loc
1105 1107 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
1106 1108 other = hg.peer(repo, opts, dest)
1107 1109
1108 1110 # easier to perform discovery through the operation
1109 1111 op = exchange.pushoperation(repo, other)
1110 1112 exchange._pushdiscoverychangeset(op)
1111 1113
1112 1114 remotesubset = op.fallbackheads
1113 1115
1114 1116 with other.commandexecutor() as e:
1115 1117 remotephases = e.callcommand(b'listkeys',
1116 1118 {b'namespace': b'phases'}).result()
1117 1119 del other
1118 1120 publishing = remotephases.get(b'publishing', False)
1119 1121 if publishing:
1120 1122 ui.status((b'publishing: yes\n'))
1121 1123 else:
1122 1124 ui.status((b'publishing: no\n'))
1123 1125
1124 1126 nodemap = repo.changelog.nodemap
1125 1127 nonpublishroots = 0
1126 1128 for nhex, phase in remotephases.iteritems():
1127 1129 if nhex == b'publishing': # ignore data related to publish option
1128 1130 continue
1129 1131 node = bin(nhex)
1130 1132 if node in nodemap and int(phase):
1131 1133 nonpublishroots += 1
1132 1134 ui.status((b'number of roots: %d\n') % len(remotephases))
1133 1135 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
1134 1136 def d():
1135 1137 phases.remotephasessummary(repo,
1136 1138 remotesubset,
1137 1139 remotephases)
1138 1140 timer(d)
1139 1141 fm.end()
1140 1142
1141 1143 @command(b'perfmanifest',[
1142 1144 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1143 1145 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1144 1146 ] + formatteropts, b'REV|NODE')
1145 1147 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1146 1148 """benchmark the time to read a manifest from disk and return a usable
1147 1149 dict-like object
1148 1150
1149 1151 Manifest caches are cleared before retrieval."""
1150 1152 opts = _byteskwargs(opts)
1151 1153 timer, fm = gettimer(ui, opts)
1152 1154 if not manifest_rev:
1153 1155 ctx = scmutil.revsingle(repo, rev, rev)
1154 1156 t = ctx.manifestnode()
1155 1157 else:
1156 1158 from mercurial.node import bin
1157 1159
1158 1160 if len(rev) == 40:
1159 1161 t = bin(rev)
1160 1162 else:
1161 1163 try:
1162 1164 rev = int(rev)
1163 1165
1164 1166 if util.safehasattr(repo.manifestlog, b'getstorage'):
1165 1167 t = repo.manifestlog.getstorage(b'').node(rev)
1166 1168 else:
1167 1169 t = repo.manifestlog._revlog.lookup(rev)
1168 1170 except ValueError:
1169 1171 raise error.Abort(b'manifest revision must be integer or full '
1170 1172 b'node')
1171 1173 def d():
1172 1174 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1173 1175 repo.manifestlog[t].read()
1174 1176 timer(d)
1175 1177 fm.end()
1176 1178
1177 1179 @command(b'perfchangeset', formatteropts)
1178 1180 def perfchangeset(ui, repo, rev, **opts):
1179 1181 opts = _byteskwargs(opts)
1180 1182 timer, fm = gettimer(ui, opts)
1181 1183 n = scmutil.revsingle(repo, rev).node()
1182 1184 def d():
1183 1185 repo.changelog.read(n)
1184 1186 #repo.changelog._cache = None
1185 1187 timer(d)
1186 1188 fm.end()
1187 1189
1188 1190 @command(b'perfignore', formatteropts)
1189 1191 def perfignore(ui, repo, **opts):
1190 1192 """benchmark operation related to computing ignore"""
1191 1193 opts = _byteskwargs(opts)
1192 1194 timer, fm = gettimer(ui, opts)
1193 1195 dirstate = repo.dirstate
1194 1196
1195 1197 def setupone():
1196 1198 dirstate.invalidate()
1197 1199 clearfilecache(dirstate, b'_ignore')
1198 1200
1199 1201 def runone():
1200 1202 dirstate._ignore
1201 1203
1202 1204 timer(runone, setup=setupone, title=b"load")
1203 1205 fm.end()
1204 1206
1205 1207 @command(b'perfindex', [
1206 1208 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1207 1209 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1208 1210 ] + formatteropts)
1209 1211 def perfindex(ui, repo, **opts):
1210 1212 """benchmark index creation time followed by a lookup
1211 1213
1212 1214 The default is to look `tip` up. Depending on the index implementation,
1213 1215 the revision looked up can matters. For example, an implementation
1214 1216 scanning the index will have a faster lookup time for `--rev tip` than for
1215 1217 `--rev 0`. The number of looked up revisions and their order can also
1216 1218 matters.
1217 1219
1218 1220 Example of useful set to test:
1219 1221 * tip
1220 1222 * 0
1221 1223 * -10:
1222 1224 * :10
1223 1225 * -10: + :10
1224 1226 * :10: + -10:
1225 1227 * -10000:
1226 1228 * -10000: + 0
1227 1229
1228 1230 It is not currently possible to check for lookup of a missing node. For
1229 1231 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1230 1232 import mercurial.revlog
1231 1233 opts = _byteskwargs(opts)
1232 1234 timer, fm = gettimer(ui, opts)
1233 1235 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1234 1236 if opts[b'no_lookup']:
1235 1237 if opts['rev']:
1236 1238 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1237 1239 nodes = []
1238 1240 elif not opts[b'rev']:
1239 1241 nodes = [repo[b"tip"].node()]
1240 1242 else:
1241 1243 revs = scmutil.revrange(repo, opts[b'rev'])
1242 1244 cl = repo.changelog
1243 1245 nodes = [cl.node(r) for r in revs]
1244 1246
1245 1247 unfi = repo.unfiltered()
1246 1248 # find the filecache func directly
1247 1249 # This avoid polluting the benchmark with the filecache logic
1248 1250 makecl = unfi.__class__.changelog.func
1249 1251 def setup():
1250 1252 # probably not necessary, but for good measure
1251 1253 clearchangelog(unfi)
1252 1254 def d():
1253 1255 cl = makecl(unfi)
1254 1256 for n in nodes:
1255 1257 cl.rev(n)
1256 1258 timer(d, setup=setup)
1257 1259 fm.end()
1258 1260
1259 1261 @command(b'perfnodemap', [
1260 1262 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1261 1263 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1262 1264 ] + formatteropts)
1263 1265 def perfnodemap(ui, repo, **opts):
1264 1266 """benchmark the time necessary to look up revision from a cold nodemap
1265 1267
1266 1268 Depending on the implementation, the amount and order of revision we look
1267 1269 up can varies. Example of useful set to test:
1268 1270 * tip
1269 1271 * 0
1270 1272 * -10:
1271 1273 * :10
1272 1274 * -10: + :10
1273 1275 * :10: + -10:
1274 1276 * -10000:
1275 1277 * -10000: + 0
1276 1278
1277 1279 The command currently focus on valid binary lookup. Benchmarking for
1278 1280 hexlookup, prefix lookup and missing lookup would also be valuable.
1279 1281 """
1280 1282 import mercurial.revlog
1281 1283 opts = _byteskwargs(opts)
1282 1284 timer, fm = gettimer(ui, opts)
1283 1285 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1284 1286
1285 1287 unfi = repo.unfiltered()
1286 1288 clearcaches = opts['clear_caches']
1287 1289 # find the filecache func directly
1288 1290 # This avoid polluting the benchmark with the filecache logic
1289 1291 makecl = unfi.__class__.changelog.func
1290 1292 if not opts[b'rev']:
1291 1293 raise error.Abort('use --rev to specify revisions to look up')
1292 1294 revs = scmutil.revrange(repo, opts[b'rev'])
1293 1295 cl = repo.changelog
1294 1296 nodes = [cl.node(r) for r in revs]
1295 1297
1296 1298 # use a list to pass reference to a nodemap from one closure to the next
1297 1299 nodeget = [None]
1298 1300 def setnodeget():
1299 1301 # probably not necessary, but for good measure
1300 1302 clearchangelog(unfi)
1301 1303 nodeget[0] = makecl(unfi).nodemap.get
1302 1304
1303 1305 def d():
1304 1306 get = nodeget[0]
1305 1307 for n in nodes:
1306 1308 get(n)
1307 1309
1308 1310 setup = None
1309 1311 if clearcaches:
1310 1312 def setup():
1311 1313 setnodeget()
1312 1314 else:
1313 1315 setnodeget()
1314 1316 d() # prewarm the data structure
1315 1317 timer(d, setup=setup)
1316 1318 fm.end()
1317 1319
1318 1320 @command(b'perfstartup', formatteropts)
1319 1321 def perfstartup(ui, repo, **opts):
1320 1322 opts = _byteskwargs(opts)
1321 1323 timer, fm = gettimer(ui, opts)
1322 1324 def d():
1323 1325 if os.name != r'nt':
1324 1326 os.system(b"HGRCPATH= %s version -q > /dev/null" %
1325 1327 fsencode(sys.argv[0]))
1326 1328 else:
1327 1329 os.environ[r'HGRCPATH'] = r' '
1328 1330 os.system(r"%s version -q > NUL" % sys.argv[0])
1329 1331 timer(d)
1330 1332 fm.end()
1331 1333
1332 1334 @command(b'perfparents', formatteropts)
1333 1335 def perfparents(ui, repo, **opts):
1334 1336 """benchmark the time necessary to fetch one changeset's parents.
1335 1337
1336 1338 The fetch is done using the `node identifier`, traversing all object layers
1337 1339 from the repository object. The first N revisions will be used for this
1338 1340 benchmark. N is controlled by the ``perf.parentscount`` config option
1339 1341 (default: 1000).
1340 1342 """
1341 1343 opts = _byteskwargs(opts)
1342 1344 timer, fm = gettimer(ui, opts)
1343 1345 # control the number of commits perfparents iterates over
1344 1346 # experimental config: perf.parentscount
1345 1347 count = getint(ui, b"perf", b"parentscount", 1000)
1346 1348 if len(repo.changelog) < count:
1347 1349 raise error.Abort(b"repo needs %d commits for this test" % count)
1348 1350 repo = repo.unfiltered()
1349 1351 nl = [repo.changelog.node(i) for i in _xrange(count)]
1350 1352 def d():
1351 1353 for n in nl:
1352 1354 repo.changelog.parents(n)
1353 1355 timer(d)
1354 1356 fm.end()
1355 1357
1356 1358 @command(b'perfctxfiles', formatteropts)
1357 1359 def perfctxfiles(ui, repo, x, **opts):
1358 1360 opts = _byteskwargs(opts)
1359 1361 x = int(x)
1360 1362 timer, fm = gettimer(ui, opts)
1361 1363 def d():
1362 1364 len(repo[x].files())
1363 1365 timer(d)
1364 1366 fm.end()
1365 1367
1366 1368 @command(b'perfrawfiles', formatteropts)
1367 1369 def perfrawfiles(ui, repo, x, **opts):
1368 1370 opts = _byteskwargs(opts)
1369 1371 x = int(x)
1370 1372 timer, fm = gettimer(ui, opts)
1371 1373 cl = repo.changelog
1372 1374 def d():
1373 1375 len(cl.read(x)[3])
1374 1376 timer(d)
1375 1377 fm.end()
1376 1378
1377 1379 @command(b'perflookup', formatteropts)
1378 1380 def perflookup(ui, repo, rev, **opts):
1379 1381 opts = _byteskwargs(opts)
1380 1382 timer, fm = gettimer(ui, opts)
1381 1383 timer(lambda: len(repo.lookup(rev)))
1382 1384 fm.end()
1383 1385
1384 1386 @command(b'perflinelogedits',
1385 1387 [(b'n', b'edits', 10000, b'number of edits'),
1386 1388 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1387 1389 ], norepo=True)
1388 1390 def perflinelogedits(ui, **opts):
1389 1391 from mercurial import linelog
1390 1392
1391 1393 opts = _byteskwargs(opts)
1392 1394
1393 1395 edits = opts[b'edits']
1394 1396 maxhunklines = opts[b'max_hunk_lines']
1395 1397
1396 1398 maxb1 = 100000
1397 1399 random.seed(0)
1398 1400 randint = random.randint
1399 1401 currentlines = 0
1400 1402 arglist = []
1401 1403 for rev in _xrange(edits):
1402 1404 a1 = randint(0, currentlines)
1403 1405 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1404 1406 b1 = randint(0, maxb1)
1405 1407 b2 = randint(b1, b1 + maxhunklines)
1406 1408 currentlines += (b2 - b1) - (a2 - a1)
1407 1409 arglist.append((rev, a1, a2, b1, b2))
1408 1410
1409 1411 def d():
1410 1412 ll = linelog.linelog()
1411 1413 for args in arglist:
1412 1414 ll.replacelines(*args)
1413 1415
1414 1416 timer, fm = gettimer(ui, opts)
1415 1417 timer(d)
1416 1418 fm.end()
1417 1419
1418 1420 @command(b'perfrevrange', formatteropts)
1419 1421 def perfrevrange(ui, repo, *specs, **opts):
1420 1422 opts = _byteskwargs(opts)
1421 1423 timer, fm = gettimer(ui, opts)
1422 1424 revrange = scmutil.revrange
1423 1425 timer(lambda: len(revrange(repo, specs)))
1424 1426 fm.end()
1425 1427
1426 1428 @command(b'perfnodelookup', formatteropts)
1427 1429 def perfnodelookup(ui, repo, rev, **opts):
1428 1430 opts = _byteskwargs(opts)
1429 1431 timer, fm = gettimer(ui, opts)
1430 1432 import mercurial.revlog
1431 1433 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1432 1434 n = scmutil.revsingle(repo, rev).node()
1433 1435 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1434 1436 def d():
1435 1437 cl.rev(n)
1436 1438 clearcaches(cl)
1437 1439 timer(d)
1438 1440 fm.end()
1439 1441
1440 1442 @command(b'perflog',
1441 1443 [(b'', b'rename', False, b'ask log to follow renames')
1442 1444 ] + formatteropts)
1443 1445 def perflog(ui, repo, rev=None, **opts):
1444 1446 opts = _byteskwargs(opts)
1445 1447 if rev is None:
1446 1448 rev=[]
1447 1449 timer, fm = gettimer(ui, opts)
1448 1450 ui.pushbuffer()
1449 1451 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1450 1452 copies=opts.get(b'rename')))
1451 1453 ui.popbuffer()
1452 1454 fm.end()
1453 1455
1454 1456 @command(b'perfmoonwalk', formatteropts)
1455 1457 def perfmoonwalk(ui, repo, **opts):
1456 1458 """benchmark walking the changelog backwards
1457 1459
1458 1460 This also loads the changelog data for each revision in the changelog.
1459 1461 """
1460 1462 opts = _byteskwargs(opts)
1461 1463 timer, fm = gettimer(ui, opts)
1462 1464 def moonwalk():
1463 1465 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1464 1466 ctx = repo[i]
1465 1467 ctx.branch() # read changelog data (in addition to the index)
1466 1468 timer(moonwalk)
1467 1469 fm.end()
1468 1470
1469 1471 @command(b'perftemplating',
1470 1472 [(b'r', b'rev', [], b'revisions to run the template on'),
1471 1473 ] + formatteropts)
1472 1474 def perftemplating(ui, repo, testedtemplate=None, **opts):
1473 1475 """test the rendering time of a given template"""
1474 1476 if makelogtemplater is None:
1475 1477 raise error.Abort((b"perftemplating not available with this Mercurial"),
1476 1478 hint=b"use 4.3 or later")
1477 1479
1478 1480 opts = _byteskwargs(opts)
1479 1481
1480 1482 nullui = ui.copy()
1481 1483 nullui.fout = open(os.devnull, r'wb')
1482 1484 nullui.disablepager()
1483 1485 revs = opts.get(b'rev')
1484 1486 if not revs:
1485 1487 revs = [b'all()']
1486 1488 revs = list(scmutil.revrange(repo, revs))
1487 1489
1488 1490 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1489 1491 b' {author|person}: {desc|firstline}\n')
1490 1492 if testedtemplate is None:
1491 1493 testedtemplate = defaulttemplate
1492 1494 displayer = makelogtemplater(nullui, repo, testedtemplate)
1493 1495 def format():
1494 1496 for r in revs:
1495 1497 ctx = repo[r]
1496 1498 displayer.show(ctx)
1497 1499 displayer.flush(ctx)
1498 1500
1499 1501 timer, fm = gettimer(ui, opts)
1500 1502 timer(format)
1501 1503 fm.end()
1502 1504
1503 1505 @command(b'perfhelper-mergecopies', formatteropts +
1504 1506 [
1505 1507 (b'r', b'revs', [], b'restrict search to these revisions'),
1506 1508 (b'', b'timing', False, b'provides extra data (costly)'),
1507 1509 ])
1508 1510 def perfhelpermergecopies(ui, repo, revs=[], **opts):
1509 1511 """find statistics about potential parameters for `perfmergecopies`
1510 1512
1511 1513 This command find (base, p1, p2) triplet relevant for copytracing
1512 1514 benchmarking in the context of a merge. It reports values for some of the
1513 1515 parameters that impact merge copy tracing time during merge.
1514 1516
1515 1517 If `--timing` is set, rename detection is run and the associated timing
1516 1518 will be reported. The extra details come at the cost of slower command
1517 1519 execution.
1518 1520
1519 1521 Since rename detection is only run once, other factors might easily
1520 1522 affect the precision of the timing. However it should give a good
1521 1523 approximation of which revision triplets are very costly.
1522 1524 """
1523 1525 opts = _byteskwargs(opts)
1524 1526 fm = ui.formatter(b'perf', opts)
1525 1527 dotiming = opts[b'timing']
1526 1528
1527 1529 output_template = [
1528 1530 ("base", "%(base)12s"),
1529 1531 ("p1", "%(p1.node)12s"),
1530 1532 ("p2", "%(p2.node)12s"),
1531 1533 ("p1.nb-revs", "%(p1.nbrevs)12d"),
1532 1534 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
1533 1535 ("p1.renames", "%(p1.renamedfiles)12d"),
1534 1536 ("p1.time", "%(p1.time)12.3f"),
1535 1537 ("p2.nb-revs", "%(p2.nbrevs)12d"),
1536 1538 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
1537 1539 ("p2.renames", "%(p2.renamedfiles)12d"),
1538 1540 ("p2.time", "%(p2.time)12.3f"),
1539 1541 ("renames", "%(nbrenamedfiles)12d"),
1540 1542 ("total.time", "%(time)12.3f"),
1541 1543 ]
1542 1544 if not dotiming:
1543 1545 output_template = [i for i in output_template
1544 1546 if not ('time' in i[0] or 'renames' in i[0])]
1545 1547 header_names = [h for (h, v) in output_template]
1546 1548 output = ' '.join([v for (h, v) in output_template]) + '\n'
1547 1549 header = ' '.join(['%12s'] * len(header_names)) + '\n'
1548 1550 fm.plain(header % tuple(header_names))
1549 1551
1550 1552 if not revs:
1551 1553 revs = ['all()']
1552 1554 revs = scmutil.revrange(repo, revs)
1553 1555
1554 1556 roi = repo.revs('merge() and %ld', revs)
1555 1557 for r in roi:
1556 1558 ctx = repo[r]
1557 1559 p1 = ctx.p1()
1558 1560 p2 = ctx.p2()
1559 1561 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
1560 1562 for b in bases:
1561 1563 b = repo[b]
1562 1564 p1missing = copies._computeforwardmissing(b, p1)
1563 1565 p2missing = copies._computeforwardmissing(b, p2)
1564 1566 data = {
1565 1567 b'base': b.hex(),
1566 1568 b'p1.node': p1.hex(),
1567 1569 b'p1.nbrevs': len(repo.revs('%d::%d', b.rev(), p1.rev())),
1568 1570 b'p1.nbmissingfiles': len(p1missing),
1569 1571 b'p2.node': p2.hex(),
1570 1572 b'p2.nbrevs': len(repo.revs('%d::%d', b.rev(), p2.rev())),
1571 1573 b'p2.nbmissingfiles': len(p2missing),
1572 1574 }
1573 1575 if dotiming:
1574 1576 begin = util.timer()
1575 1577 mergedata = copies.mergecopies(repo, p1, p2, b)
1576 1578 end = util.timer()
1577 1579 # not very stable timing since we did only one run
1578 1580 data['time'] = end - begin
1579 1581 # mergedata contains five dicts: "copy", "movewithdir",
1580 1582 # "diverge", "renamedelete" and "dirmove".
1581 1583 # The first 4 are about renamed file so lets count that.
1582 1584 renames = len(mergedata[0])
1583 1585 renames += len(mergedata[1])
1584 1586 renames += len(mergedata[2])
1585 1587 renames += len(mergedata[3])
1586 1588 data['nbrenamedfiles'] = renames
1587 1589 begin = util.timer()
1588 1590 p1renames = copies.pathcopies(b, p1)
1589 1591 end = util.timer()
1590 1592 data['p1.time'] = end - begin
1591 1593 begin = util.timer()
1592 1594 p2renames = copies.pathcopies(b, p2)
1593 1595 data['p2.time'] = end - begin
1594 1596 end = util.timer()
1595 1597 data['p1.renamedfiles'] = len(p1renames)
1596 1598 data['p2.renamedfiles'] = len(p2renames)
1597 1599 fm.startitem()
1598 1600 fm.data(**data)
1599 1601 # make node pretty for the human output
1600 1602 out = data.copy()
1601 1603 out['base'] = fm.hexfunc(b.node())
1602 1604 out['p1.node'] = fm.hexfunc(p1.node())
1603 1605 out['p2.node'] = fm.hexfunc(p2.node())
1604 1606 fm.plain(output % out)
1605 1607
1606 1608 fm.end()
1607 1609
1608 1610 @command(b'perfhelper-pathcopies', formatteropts +
1609 1611 [
1610 1612 (b'r', b'revs', [], b'restrict search to these revisions'),
1611 1613 (b'', b'timing', False, b'provides extra data (costly)'),
1612 1614 ])
1613 1615 def perfhelperpathcopies(ui, repo, revs=[], **opts):
1614 1616 """find statistic about potential parameters for the `perftracecopies`
1615 1617
1616 1618 This command find source-destination pair relevant for copytracing testing.
1617 1619 It report value for some of the parameters that impact copy tracing time.
1618 1620
1619 1621 If `--timing` is set, rename detection is run and the associated timing
1620 1622 will be reported. The extra details comes at the cost of a slower command
1621 1623 execution.
1622 1624
1623 1625 Since the rename detection is only run once, other factors might easily
1624 1626 affect the precision of the timing. However it should give a good
1625 1627 approximation of which revision pairs are very costly.
1626 1628 """
1627 1629 opts = _byteskwargs(opts)
1628 1630 fm = ui.formatter(b'perf', opts)
1629 1631 dotiming = opts[b'timing']
1630 1632
1631 1633 if dotiming:
1632 1634 header = '%12s %12s %12s %12s %12s %12s\n'
1633 1635 output = ("%(source)12s %(destination)12s "
1634 1636 "%(nbrevs)12d %(nbmissingfiles)12d "
1635 1637 "%(nbrenamedfiles)12d %(time)18.5f\n")
1636 1638 header_names = ("source", "destination", "nb-revs", "nb-files",
1637 1639 "nb-renames", "time")
1638 1640 fm.plain(header % header_names)
1639 1641 else:
1640 1642 header = '%12s %12s %12s %12s\n'
1641 1643 output = ("%(source)12s %(destination)12s "
1642 1644 "%(nbrevs)12d %(nbmissingfiles)12d\n")
1643 1645 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
1644 1646
1645 1647 if not revs:
1646 1648 revs = ['all()']
1647 1649 revs = scmutil.revrange(repo, revs)
1648 1650
1649 1651 roi = repo.revs('merge() and %ld', revs)
1650 1652 for r in roi:
1651 1653 ctx = repo[r]
1652 1654 p1 = ctx.p1().rev()
1653 1655 p2 = ctx.p2().rev()
1654 1656 bases = repo.changelog._commonancestorsheads(p1, p2)
1655 1657 for p in (p1, p2):
1656 1658 for b in bases:
1657 1659 base = repo[b]
1658 1660 parent = repo[p]
1659 1661 missing = copies._computeforwardmissing(base, parent)
1660 1662 if not missing:
1661 1663 continue
1662 1664 data = {
1663 1665 b'source': base.hex(),
1664 1666 b'destination': parent.hex(),
1665 1667 b'nbrevs': len(repo.revs('%d::%d', b, p)),
1666 1668 b'nbmissingfiles': len(missing),
1667 1669 }
1668 1670 if dotiming:
1669 1671 begin = util.timer()
1670 1672 renames = copies.pathcopies(base, parent)
1671 1673 end = util.timer()
1672 1674 # not very stable timing since we did only one run
1673 1675 data['time'] = end - begin
1674 1676 data['nbrenamedfiles'] = len(renames)
1675 1677 fm.startitem()
1676 1678 fm.data(**data)
1677 1679 out = data.copy()
1678 1680 out['source'] = fm.hexfunc(base.node())
1679 1681 out['destination'] = fm.hexfunc(parent.node())
1680 1682 fm.plain(output % out)
1681 1683
1682 1684 fm.end()
1683 1685
1684 1686 @command(b'perfcca', formatteropts)
1685 1687 def perfcca(ui, repo, **opts):
1686 1688 opts = _byteskwargs(opts)
1687 1689 timer, fm = gettimer(ui, opts)
1688 1690 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1689 1691 fm.end()
1690 1692
1691 1693 @command(b'perffncacheload', formatteropts)
1692 1694 def perffncacheload(ui, repo, **opts):
1693 1695 opts = _byteskwargs(opts)
1694 1696 timer, fm = gettimer(ui, opts)
1695 1697 s = repo.store
1696 1698 def d():
1697 1699 s.fncache._load()
1698 1700 timer(d)
1699 1701 fm.end()
1700 1702
1701 1703 @command(b'perffncachewrite', formatteropts)
1702 1704 def perffncachewrite(ui, repo, **opts):
1703 1705 opts = _byteskwargs(opts)
1704 1706 timer, fm = gettimer(ui, opts)
1705 1707 s = repo.store
1706 1708 lock = repo.lock()
1707 1709 s.fncache._load()
1708 1710 tr = repo.transaction(b'perffncachewrite')
1709 1711 tr.addbackup(b'fncache')
1710 1712 def d():
1711 1713 s.fncache._dirty = True
1712 1714 s.fncache.write(tr)
1713 1715 timer(d)
1714 1716 tr.close()
1715 1717 lock.release()
1716 1718 fm.end()
1717 1719
1718 1720 @command(b'perffncacheencode', formatteropts)
1719 1721 def perffncacheencode(ui, repo, **opts):
1720 1722 opts = _byteskwargs(opts)
1721 1723 timer, fm = gettimer(ui, opts)
1722 1724 s = repo.store
1723 1725 s.fncache._load()
1724 1726 def d():
1725 1727 for p in s.fncache.entries:
1726 1728 s.encode(p)
1727 1729 timer(d)
1728 1730 fm.end()
1729 1731
1730 1732 def _bdiffworker(q, blocks, xdiff, ready, done):
1731 1733 while not done.is_set():
1732 1734 pair = q.get()
1733 1735 while pair is not None:
1734 1736 if xdiff:
1735 1737 mdiff.bdiff.xdiffblocks(*pair)
1736 1738 elif blocks:
1737 1739 mdiff.bdiff.blocks(*pair)
1738 1740 else:
1739 1741 mdiff.textdiff(*pair)
1740 1742 q.task_done()
1741 1743 pair = q.get()
1742 1744 q.task_done() # for the None one
1743 1745 with ready:
1744 1746 ready.wait()
1745 1747
1746 1748 def _manifestrevision(repo, mnode):
1747 1749 ml = repo.manifestlog
1748 1750
1749 1751 if util.safehasattr(ml, b'getstorage'):
1750 1752 store = ml.getstorage(b'')
1751 1753 else:
1752 1754 store = ml._revlog
1753 1755
1754 1756 return store.revision(mnode)
1755 1757
1756 1758 @command(b'perfbdiff', revlogopts + formatteropts + [
1757 1759 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1758 1760 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1759 1761 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1760 1762 (b'', b'blocks', False, b'test computing diffs into blocks'),
1761 1763 (b'', b'xdiff', False, b'use xdiff algorithm'),
1762 1764 ],
1763 1765
1764 1766 b'-c|-m|FILE REV')
1765 1767 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1766 1768 """benchmark a bdiff between revisions
1767 1769
1768 1770 By default, benchmark a bdiff between its delta parent and itself.
1769 1771
1770 1772 With ``--count``, benchmark bdiffs between delta parents and self for N
1771 1773 revisions starting at the specified revision.
1772 1774
1773 1775 With ``--alldata``, assume the requested revision is a changeset and
1774 1776 measure bdiffs for all changes related to that changeset (manifest
1775 1777 and filelogs).
1776 1778 """
1777 1779 opts = _byteskwargs(opts)
1778 1780
1779 1781 if opts[b'xdiff'] and not opts[b'blocks']:
1780 1782 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1781 1783
1782 1784 if opts[b'alldata']:
1783 1785 opts[b'changelog'] = True
1784 1786
1785 1787 if opts.get(b'changelog') or opts.get(b'manifest'):
1786 1788 file_, rev = None, file_
1787 1789 elif rev is None:
1788 1790 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1789 1791
1790 1792 blocks = opts[b'blocks']
1791 1793 xdiff = opts[b'xdiff']
1792 1794 textpairs = []
1793 1795
1794 1796 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1795 1797
1796 1798 startrev = r.rev(r.lookup(rev))
1797 1799 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1798 1800 if opts[b'alldata']:
1799 1801 # Load revisions associated with changeset.
1800 1802 ctx = repo[rev]
1801 1803 mtext = _manifestrevision(repo, ctx.manifestnode())
1802 1804 for pctx in ctx.parents():
1803 1805 pman = _manifestrevision(repo, pctx.manifestnode())
1804 1806 textpairs.append((pman, mtext))
1805 1807
1806 1808 # Load filelog revisions by iterating manifest delta.
1807 1809 man = ctx.manifest()
1808 1810 pman = ctx.p1().manifest()
1809 1811 for filename, change in pman.diff(man).items():
1810 1812 fctx = repo.file(filename)
1811 1813 f1 = fctx.revision(change[0][0] or -1)
1812 1814 f2 = fctx.revision(change[1][0] or -1)
1813 1815 textpairs.append((f1, f2))
1814 1816 else:
1815 1817 dp = r.deltaparent(rev)
1816 1818 textpairs.append((r.revision(dp), r.revision(rev)))
1817 1819
1818 1820 withthreads = threads > 0
1819 1821 if not withthreads:
1820 1822 def d():
1821 1823 for pair in textpairs:
1822 1824 if xdiff:
1823 1825 mdiff.bdiff.xdiffblocks(*pair)
1824 1826 elif blocks:
1825 1827 mdiff.bdiff.blocks(*pair)
1826 1828 else:
1827 1829 mdiff.textdiff(*pair)
1828 1830 else:
1829 1831 q = queue()
1830 1832 for i in _xrange(threads):
1831 1833 q.put(None)
1832 1834 ready = threading.Condition()
1833 1835 done = threading.Event()
1834 1836 for i in _xrange(threads):
1835 1837 threading.Thread(target=_bdiffworker,
1836 1838 args=(q, blocks, xdiff, ready, done)).start()
1837 1839 q.join()
1838 1840 def d():
1839 1841 for pair in textpairs:
1840 1842 q.put(pair)
1841 1843 for i in _xrange(threads):
1842 1844 q.put(None)
1843 1845 with ready:
1844 1846 ready.notify_all()
1845 1847 q.join()
1846 1848 timer, fm = gettimer(ui, opts)
1847 1849 timer(d)
1848 1850 fm.end()
1849 1851
1850 1852 if withthreads:
1851 1853 done.set()
1852 1854 for i in _xrange(threads):
1853 1855 q.put(None)
1854 1856 with ready:
1855 1857 ready.notify_all()
1856 1858
1857 1859 @command(b'perfunidiff', revlogopts + formatteropts + [
1858 1860 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1859 1861 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
1860 1862 ], b'-c|-m|FILE REV')
1861 1863 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1862 1864 """benchmark a unified diff between revisions
1863 1865
1864 1866 This doesn't include any copy tracing - it's just a unified diff
1865 1867 of the texts.
1866 1868
1867 1869 By default, benchmark a diff between its delta parent and itself.
1868 1870
1869 1871 With ``--count``, benchmark diffs between delta parents and self for N
1870 1872 revisions starting at the specified revision.
1871 1873
1872 1874 With ``--alldata``, assume the requested revision is a changeset and
1873 1875 measure diffs for all changes related to that changeset (manifest
1874 1876 and filelogs).
1875 1877 """
1876 1878 opts = _byteskwargs(opts)
1877 1879 if opts[b'alldata']:
1878 1880 opts[b'changelog'] = True
1879 1881
1880 1882 if opts.get(b'changelog') or opts.get(b'manifest'):
1881 1883 file_, rev = None, file_
1882 1884 elif rev is None:
1883 1885 raise error.CommandError(b'perfunidiff', b'invalid arguments')
1884 1886
1885 1887 textpairs = []
1886 1888
1887 1889 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
1888 1890
1889 1891 startrev = r.rev(r.lookup(rev))
1890 1892 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1891 1893 if opts[b'alldata']:
1892 1894 # Load revisions associated with changeset.
1893 1895 ctx = repo[rev]
1894 1896 mtext = _manifestrevision(repo, ctx.manifestnode())
1895 1897 for pctx in ctx.parents():
1896 1898 pman = _manifestrevision(repo, pctx.manifestnode())
1897 1899 textpairs.append((pman, mtext))
1898 1900
1899 1901 # Load filelog revisions by iterating manifest delta.
1900 1902 man = ctx.manifest()
1901 1903 pman = ctx.p1().manifest()
1902 1904 for filename, change in pman.diff(man).items():
1903 1905 fctx = repo.file(filename)
1904 1906 f1 = fctx.revision(change[0][0] or -1)
1905 1907 f2 = fctx.revision(change[1][0] or -1)
1906 1908 textpairs.append((f1, f2))
1907 1909 else:
1908 1910 dp = r.deltaparent(rev)
1909 1911 textpairs.append((r.revision(dp), r.revision(rev)))
1910 1912
1911 1913 def d():
1912 1914 for left, right in textpairs:
1913 1915 # The date strings don't matter, so we pass empty strings.
1914 1916 headerlines, hunks = mdiff.unidiff(
1915 1917 left, b'', right, b'', b'left', b'right', binary=False)
1916 1918 # consume iterators in roughly the way patch.py does
1917 1919 b'\n'.join(headerlines)
1918 1920 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1919 1921 timer, fm = gettimer(ui, opts)
1920 1922 timer(d)
1921 1923 fm.end()
1922 1924
1923 1925 @command(b'perfdiffwd', formatteropts)
1924 1926 def perfdiffwd(ui, repo, **opts):
1925 1927 """Profile diff of working directory changes"""
1926 1928 opts = _byteskwargs(opts)
1927 1929 timer, fm = gettimer(ui, opts)
1928 1930 options = {
1929 1931 'w': 'ignore_all_space',
1930 1932 'b': 'ignore_space_change',
1931 1933 'B': 'ignore_blank_lines',
1932 1934 }
1933 1935
1934 1936 for diffopt in ('', 'w', 'b', 'B', 'wB'):
1935 1937 opts = dict((options[c], b'1') for c in diffopt)
1936 1938 def d():
1937 1939 ui.pushbuffer()
1938 1940 commands.diff(ui, repo, **opts)
1939 1941 ui.popbuffer()
1940 1942 diffopt = diffopt.encode('ascii')
1941 1943 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
1942 1944 timer(d, title=title)
1943 1945 fm.end()
1944 1946
1945 1947 @command(b'perfrevlogindex', revlogopts + formatteropts,
1946 1948 b'-c|-m|FILE')
1947 1949 def perfrevlogindex(ui, repo, file_=None, **opts):
1948 1950 """Benchmark operations against a revlog index.
1949 1951
1950 1952 This tests constructing a revlog instance, reading index data,
1951 1953 parsing index data, and performing various operations related to
1952 1954 index data.
1953 1955 """
1954 1956
1955 1957 opts = _byteskwargs(opts)
1956 1958
1957 1959 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
1958 1960
1959 1961 opener = getattr(rl, 'opener') # trick linter
1960 1962 indexfile = rl.indexfile
1961 1963 data = opener.read(indexfile)
1962 1964
1963 1965 header = struct.unpack(b'>I', data[0:4])[0]
1964 1966 version = header & 0xFFFF
1965 1967 if version == 1:
1966 1968 revlogio = revlog.revlogio()
1967 1969 inline = header & (1 << 16)
1968 1970 else:
1969 1971 raise error.Abort((b'unsupported revlog version: %d') % version)
1970 1972
1971 1973 rllen = len(rl)
1972 1974
1973 1975 node0 = rl.node(0)
1974 1976 node25 = rl.node(rllen // 4)
1975 1977 node50 = rl.node(rllen // 2)
1976 1978 node75 = rl.node(rllen // 4 * 3)
1977 1979 node100 = rl.node(rllen - 1)
1978 1980
1979 1981 allrevs = range(rllen)
1980 1982 allrevsrev = list(reversed(allrevs))
1981 1983 allnodes = [rl.node(rev) for rev in range(rllen)]
1982 1984 allnodesrev = list(reversed(allnodes))
1983 1985
1984 1986 def constructor():
1985 1987 revlog.revlog(opener, indexfile)
1986 1988
1987 1989 def read():
1988 1990 with opener(indexfile) as fh:
1989 1991 fh.read()
1990 1992
1991 1993 def parseindex():
1992 1994 revlogio.parseindex(data, inline)
1993 1995
1994 1996 def getentry(revornode):
1995 1997 index = revlogio.parseindex(data, inline)[0]
1996 1998 index[revornode]
1997 1999
1998 2000 def getentries(revs, count=1):
1999 2001 index = revlogio.parseindex(data, inline)[0]
2000 2002
2001 2003 for i in range(count):
2002 2004 for rev in revs:
2003 2005 index[rev]
2004 2006
2005 2007 def resolvenode(node):
2006 2008 nodemap = revlogio.parseindex(data, inline)[1]
2007 2009 # This only works for the C code.
2008 2010 if nodemap is None:
2009 2011 return
2010 2012
2011 2013 try:
2012 2014 nodemap[node]
2013 2015 except error.RevlogError:
2014 2016 pass
2015 2017
2016 2018 def resolvenodes(nodes, count=1):
2017 2019 nodemap = revlogio.parseindex(data, inline)[1]
2018 2020 if nodemap is None:
2019 2021 return
2020 2022
2021 2023 for i in range(count):
2022 2024 for node in nodes:
2023 2025 try:
2024 2026 nodemap[node]
2025 2027 except error.RevlogError:
2026 2028 pass
2027 2029
2028 2030 benches = [
2029 2031 (constructor, b'revlog constructor'),
2030 2032 (read, b'read'),
2031 2033 (parseindex, b'create index object'),
2032 2034 (lambda: getentry(0), b'retrieve index entry for rev 0'),
2033 2035 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
2034 2036 (lambda: resolvenode(node0), b'look up node at rev 0'),
2035 2037 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
2036 2038 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
2037 2039 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
2038 2040 (lambda: resolvenode(node100), b'look up node at tip'),
2039 2041 # 2x variation is to measure caching impact.
2040 2042 (lambda: resolvenodes(allnodes),
2041 2043 b'look up all nodes (forward)'),
2042 2044 (lambda: resolvenodes(allnodes, 2),
2043 2045 b'look up all nodes 2x (forward)'),
2044 2046 (lambda: resolvenodes(allnodesrev),
2045 2047 b'look up all nodes (reverse)'),
2046 2048 (lambda: resolvenodes(allnodesrev, 2),
2047 2049 b'look up all nodes 2x (reverse)'),
2048 2050 (lambda: getentries(allrevs),
2049 2051 b'retrieve all index entries (forward)'),
2050 2052 (lambda: getentries(allrevs, 2),
2051 2053 b'retrieve all index entries 2x (forward)'),
2052 2054 (lambda: getentries(allrevsrev),
2053 2055 b'retrieve all index entries (reverse)'),
2054 2056 (lambda: getentries(allrevsrev, 2),
2055 2057 b'retrieve all index entries 2x (reverse)'),
2056 2058 ]
2057 2059
2058 2060 for fn, title in benches:
2059 2061 timer, fm = gettimer(ui, opts)
2060 2062 timer(fn, title=title)
2061 2063 fm.end()
2062 2064
2063 2065 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
2064 2066 [(b'd', b'dist', 100, b'distance between the revisions'),
2065 2067 (b's', b'startrev', 0, b'revision to start reading at'),
2066 2068 (b'', b'reverse', False, b'read in reverse')],
2067 2069 b'-c|-m|FILE')
2068 2070 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
2069 2071 **opts):
2070 2072 """Benchmark reading a series of revisions from a revlog.
2071 2073
2072 2074 By default, we read every ``-d/--dist`` revision from 0 to tip of
2073 2075 the specified revlog.
2074 2076
2075 2077 The start revision can be defined via ``-s/--startrev``.
2076 2078 """
2077 2079 opts = _byteskwargs(opts)
2078 2080
2079 2081 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
2080 2082 rllen = getlen(ui)(rl)
2081 2083
2082 2084 if startrev < 0:
2083 2085 startrev = rllen + startrev
2084 2086
2085 2087 def d():
2086 2088 rl.clearcaches()
2087 2089
2088 2090 beginrev = startrev
2089 2091 endrev = rllen
2090 2092 dist = opts[b'dist']
2091 2093
2092 2094 if reverse:
2093 2095 beginrev, endrev = endrev - 1, beginrev - 1
2094 2096 dist = -1 * dist
2095 2097
2096 2098 for x in _xrange(beginrev, endrev, dist):
2097 2099 # Old revisions don't support passing int.
2098 2100 n = rl.node(x)
2099 2101 rl.revision(n)
2100 2102
2101 2103 timer, fm = gettimer(ui, opts)
2102 2104 timer(d)
2103 2105 fm.end()
2104 2106
2105 2107 @command(b'perfrevlogwrite', revlogopts + formatteropts +
2106 2108 [(b's', b'startrev', 1000, b'revision to start writing at'),
2107 2109 (b'', b'stoprev', -1, b'last revision to write'),
2108 2110 (b'', b'count', 3, b'number of passes to perform'),
2109 2111 (b'', b'details', False, b'print timing for every revisions tested'),
2110 2112 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
2111 2113 (b'', b'lazydeltabase', True, b'try the provided delta first'),
2112 2114 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
2113 2115 ],
2114 2116 b'-c|-m|FILE')
2115 2117 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
2116 2118 """Benchmark writing a series of revisions to a revlog.
2117 2119
2118 2120 Possible source values are:
2119 2121 * `full`: add from a full text (default).
2120 2122 * `parent-1`: add from a delta to the first parent
2121 2123 * `parent-2`: add from a delta to the second parent if it exists
2122 2124 (use a delta from the first parent otherwise)
2123 2125 * `parent-smallest`: add from the smallest delta (either p1 or p2)
2124 2126 * `storage`: add from the existing precomputed deltas
2125 2127
2126 2128 Note: This performance command measures performance in a custom way. As a
2127 2129 result some of the global configuration of the 'perf' command does not
2128 2130 apply to it:
2129 2131
2130 2132 * ``pre-run``: disabled
2131 2133
2132 2134 * ``profile-benchmark``: disabled
2133 2135
2134 2136 * ``run-limits``: disabled use --count instead
2135 2137 """
2136 2138 opts = _byteskwargs(opts)
2137 2139
2138 2140 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
2139 2141 rllen = getlen(ui)(rl)
2140 2142 if startrev < 0:
2141 2143 startrev = rllen + startrev
2142 2144 if stoprev < 0:
2143 2145 stoprev = rllen + stoprev
2144 2146
2145 2147 lazydeltabase = opts['lazydeltabase']
2146 2148 source = opts['source']
2147 2149 clearcaches = opts['clear_caches']
2148 2150 validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest',
2149 2151 b'storage')
2150 2152 if source not in validsource:
2151 2153 raise error.Abort('invalid source type: %s' % source)
2152 2154
2153 2155 ### actually gather results
2154 2156 count = opts['count']
2155 2157 if count <= 0:
2156 2158 raise error.Abort('invalide run count: %d' % count)
2157 2159 allresults = []
2158 2160 for c in range(count):
2159 2161 timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1,
2160 2162 lazydeltabase=lazydeltabase,
2161 2163 clearcaches=clearcaches)
2162 2164 allresults.append(timing)
2163 2165
2164 2166 ### consolidate the results in a single list
2165 2167 results = []
2166 2168 for idx, (rev, t) in enumerate(allresults[0]):
2167 2169 ts = [t]
2168 2170 for other in allresults[1:]:
2169 2171 orev, ot = other[idx]
2170 2172 assert orev == rev
2171 2173 ts.append(ot)
2172 2174 results.append((rev, ts))
2173 2175 resultcount = len(results)
2174 2176
2175 2177 ### Compute and display relevant statistics
2176 2178
2177 2179 # get a formatter
2178 2180 fm = ui.formatter(b'perf', opts)
2179 2181 displayall = ui.configbool(b"perf", b"all-timing", False)
2180 2182
2181 2183 # print individual details if requested
2182 2184 if opts['details']:
2183 2185 for idx, item in enumerate(results, 1):
2184 2186 rev, data = item
2185 2187 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
2186 2188 formatone(fm, data, title=title, displayall=displayall)
2187 2189
2188 2190 # sorts results by median time
2189 2191 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
2190 2192 # list of (name, index) to display)
2191 2193 relevants = [
2192 2194 ("min", 0),
2193 2195 ("10%", resultcount * 10 // 100),
2194 2196 ("25%", resultcount * 25 // 100),
2195 2197 ("50%", resultcount * 70 // 100),
2196 2198 ("75%", resultcount * 75 // 100),
2197 2199 ("90%", resultcount * 90 // 100),
2198 2200 ("95%", resultcount * 95 // 100),
2199 2201 ("99%", resultcount * 99 // 100),
2200 2202 ("99.9%", resultcount * 999 // 1000),
2201 2203 ("99.99%", resultcount * 9999 // 10000),
2202 2204 ("99.999%", resultcount * 99999 // 100000),
2203 2205 ("max", -1),
2204 2206 ]
2205 2207 if not ui.quiet:
2206 2208 for name, idx in relevants:
2207 2209 data = results[idx]
2208 2210 title = '%s of %d, rev %d' % (name, resultcount, data[0])
2209 2211 formatone(fm, data[1], title=title, displayall=displayall)
2210 2212
2211 2213 # XXX summing that many float will not be very precise, we ignore this fact
2212 2214 # for now
2213 2215 totaltime = []
2214 2216 for item in allresults:
2215 2217 totaltime.append((sum(x[1][0] for x in item),
2216 2218 sum(x[1][1] for x in item),
2217 2219 sum(x[1][2] for x in item),)
2218 2220 )
2219 2221 formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
2220 2222 displayall=displayall)
2221 2223 fm.end()
2222 2224
2223 2225 class _faketr(object):
2224 2226 def add(s, x, y, z=None):
2225 2227 return None
2226 2228
2227 2229 def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None,
2228 2230 lazydeltabase=True, clearcaches=True):
2229 2231 timings = []
2230 2232 tr = _faketr()
2231 2233 with _temprevlog(ui, orig, startrev) as dest:
2232 2234 dest._lazydeltabase = lazydeltabase
2233 2235 revs = list(orig.revs(startrev, stoprev))
2234 2236 total = len(revs)
2235 2237 topic = 'adding'
2236 2238 if runidx is not None:
2237 2239 topic += ' (run #%d)' % runidx
2238 2240 # Support both old and new progress API
2239 2241 if util.safehasattr(ui, 'makeprogress'):
2240 2242 progress = ui.makeprogress(topic, unit='revs', total=total)
2241 2243 def updateprogress(pos):
2242 2244 progress.update(pos)
2243 2245 def completeprogress():
2244 2246 progress.complete()
2245 2247 else:
2246 2248 def updateprogress(pos):
2247 2249 ui.progress(topic, pos, unit='revs', total=total)
2248 2250 def completeprogress():
2249 2251 ui.progress(topic, None, unit='revs', total=total)
2250 2252
2251 2253 for idx, rev in enumerate(revs):
2252 2254 updateprogress(idx)
2253 2255 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
2254 2256 if clearcaches:
2255 2257 dest.index.clearcaches()
2256 2258 dest.clearcaches()
2257 2259 with timeone() as r:
2258 2260 dest.addrawrevision(*addargs, **addkwargs)
2259 2261 timings.append((rev, r[0]))
2260 2262 updateprogress(total)
2261 2263 completeprogress()
2262 2264 return timings
2263 2265
2264 2266 def _getrevisionseed(orig, rev, tr, source):
2265 2267 from mercurial.node import nullid
2266 2268
2267 2269 linkrev = orig.linkrev(rev)
2268 2270 node = orig.node(rev)
2269 2271 p1, p2 = orig.parents(node)
2270 2272 flags = orig.flags(rev)
2271 2273 cachedelta = None
2272 2274 text = None
2273 2275
2274 2276 if source == b'full':
2275 2277 text = orig.revision(rev)
2276 2278 elif source == b'parent-1':
2277 2279 baserev = orig.rev(p1)
2278 2280 cachedelta = (baserev, orig.revdiff(p1, rev))
2279 2281 elif source == b'parent-2':
2280 2282 parent = p2
2281 2283 if p2 == nullid:
2282 2284 parent = p1
2283 2285 baserev = orig.rev(parent)
2284 2286 cachedelta = (baserev, orig.revdiff(parent, rev))
2285 2287 elif source == b'parent-smallest':
2286 2288 p1diff = orig.revdiff(p1, rev)
2287 2289 parent = p1
2288 2290 diff = p1diff
2289 2291 if p2 != nullid:
2290 2292 p2diff = orig.revdiff(p2, rev)
2291 2293 if len(p1diff) > len(p2diff):
2292 2294 parent = p2
2293 2295 diff = p2diff
2294 2296 baserev = orig.rev(parent)
2295 2297 cachedelta = (baserev, diff)
2296 2298 elif source == b'storage':
2297 2299 baserev = orig.deltaparent(rev)
2298 2300 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
2299 2301
2300 2302 return ((text, tr, linkrev, p1, p2),
2301 2303 {'node': node, 'flags': flags, 'cachedelta': cachedelta})
2302 2304
2303 2305 @contextlib.contextmanager
2304 2306 def _temprevlog(ui, orig, truncaterev):
2305 2307 from mercurial import vfs as vfsmod
2306 2308
2307 2309 if orig._inline:
2308 2310 raise error.Abort('not supporting inline revlog (yet)')
2309 2311 revlogkwargs = {}
2310 2312 k = 'upperboundcomp'
2311 2313 if util.safehasattr(orig, k):
2312 2314 revlogkwargs[k] = getattr(orig, k)
2313 2315
2314 2316 origindexpath = orig.opener.join(orig.indexfile)
2315 2317 origdatapath = orig.opener.join(orig.datafile)
2316 2318 indexname = 'revlog.i'
2317 2319 dataname = 'revlog.d'
2318 2320
2319 2321 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
2320 2322 try:
2321 2323 # copy the data file in a temporary directory
2322 2324 ui.debug('copying data in %s\n' % tmpdir)
2323 2325 destindexpath = os.path.join(tmpdir, 'revlog.i')
2324 2326 destdatapath = os.path.join(tmpdir, 'revlog.d')
2325 2327 shutil.copyfile(origindexpath, destindexpath)
2326 2328 shutil.copyfile(origdatapath, destdatapath)
2327 2329
2328 2330 # remove the data we want to add again
2329 2331 ui.debug('truncating data to be rewritten\n')
2330 2332 with open(destindexpath, 'ab') as index:
2331 2333 index.seek(0)
2332 2334 index.truncate(truncaterev * orig._io.size)
2333 2335 with open(destdatapath, 'ab') as data:
2334 2336 data.seek(0)
2335 2337 data.truncate(orig.start(truncaterev))
2336 2338
2337 2339 # instantiate a new revlog from the temporary copy
2338 2340 ui.debug('truncating adding to be rewritten\n')
2339 2341 vfs = vfsmod.vfs(tmpdir)
2340 2342 vfs.options = getattr(orig.opener, 'options', None)
2341 2343
2342 2344 dest = revlog.revlog(vfs,
2343 2345 indexfile=indexname,
2344 2346 datafile=dataname, **revlogkwargs)
2345 2347 if dest._inline:
2346 2348 raise error.Abort('not supporting inline revlog (yet)')
2347 2349 # make sure internals are initialized
2348 2350 dest.revision(len(dest) - 1)
2349 2351 yield dest
2350 2352 del dest, vfs
2351 2353 finally:
2352 2354 shutil.rmtree(tmpdir, True)
2353 2355
2354 2356 @command(b'perfrevlogchunks', revlogopts + formatteropts +
2355 2357 [(b'e', b'engines', b'', b'compression engines to use'),
2356 2358 (b's', b'startrev', 0, b'revision to start at')],
2357 2359 b'-c|-m|FILE')
2358 2360 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
2359 2361 """Benchmark operations on revlog chunks.
2360 2362
2361 2363 Logically, each revlog is a collection of fulltext revisions. However,
2362 2364 stored within each revlog are "chunks" of possibly compressed data. This
2363 2365 data needs to be read and decompressed or compressed and written.
2364 2366
2365 2367 This command measures the time it takes to read+decompress and recompress
2366 2368 chunks in a revlog. It effectively isolates I/O and compression performance.
2367 2369 For measurements of higher-level operations like resolving revisions,
2368 2370 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
2369 2371 """
2370 2372 opts = _byteskwargs(opts)
2371 2373
2372 2374 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
2373 2375
2374 2376 # _chunkraw was renamed to _getsegmentforrevs.
2375 2377 try:
2376 2378 segmentforrevs = rl._getsegmentforrevs
2377 2379 except AttributeError:
2378 2380 segmentforrevs = rl._chunkraw
2379 2381
2380 2382 # Verify engines argument.
2381 2383 if engines:
2382 2384 engines = set(e.strip() for e in engines.split(b','))
2383 2385 for engine in engines:
2384 2386 try:
2385 2387 util.compressionengines[engine]
2386 2388 except KeyError:
2387 2389 raise error.Abort(b'unknown compression engine: %s' % engine)
2388 2390 else:
2389 2391 engines = []
2390 2392 for e in util.compengines:
2391 2393 engine = util.compengines[e]
2392 2394 try:
2393 2395 if engine.available():
2394 2396 engine.revlogcompressor().compress(b'dummy')
2395 2397 engines.append(e)
2396 2398 except NotImplementedError:
2397 2399 pass
2398 2400
2399 2401 revs = list(rl.revs(startrev, len(rl) - 1))
2400 2402
2401 2403 def rlfh(rl):
2402 2404 if rl._inline:
2403 2405 return getsvfs(repo)(rl.indexfile)
2404 2406 else:
2405 2407 return getsvfs(repo)(rl.datafile)
2406 2408
2407 2409 def doread():
2408 2410 rl.clearcaches()
2409 2411 for rev in revs:
2410 2412 segmentforrevs(rev, rev)
2411 2413
2412 2414 def doreadcachedfh():
2413 2415 rl.clearcaches()
2414 2416 fh = rlfh(rl)
2415 2417 for rev in revs:
2416 2418 segmentforrevs(rev, rev, df=fh)
2417 2419
2418 2420 def doreadbatch():
2419 2421 rl.clearcaches()
2420 2422 segmentforrevs(revs[0], revs[-1])
2421 2423
2422 2424 def doreadbatchcachedfh():
2423 2425 rl.clearcaches()
2424 2426 fh = rlfh(rl)
2425 2427 segmentforrevs(revs[0], revs[-1], df=fh)
2426 2428
2427 2429 def dochunk():
2428 2430 rl.clearcaches()
2429 2431 fh = rlfh(rl)
2430 2432 for rev in revs:
2431 2433 rl._chunk(rev, df=fh)
2432 2434
2433 2435 chunks = [None]
2434 2436
2435 2437 def dochunkbatch():
2436 2438 rl.clearcaches()
2437 2439 fh = rlfh(rl)
2438 2440 # Save chunks as a side-effect.
2439 2441 chunks[0] = rl._chunks(revs, df=fh)
2440 2442
2441 2443 def docompress(compressor):
2442 2444 rl.clearcaches()
2443 2445
2444 2446 try:
2445 2447 # Swap in the requested compression engine.
2446 2448 oldcompressor = rl._compressor
2447 2449 rl._compressor = compressor
2448 2450 for chunk in chunks[0]:
2449 2451 rl.compress(chunk)
2450 2452 finally:
2451 2453 rl._compressor = oldcompressor
2452 2454
2453 2455 benches = [
2454 2456 (lambda: doread(), b'read'),
2455 2457 (lambda: doreadcachedfh(), b'read w/ reused fd'),
2456 2458 (lambda: doreadbatch(), b'read batch'),
2457 2459 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
2458 2460 (lambda: dochunk(), b'chunk'),
2459 2461 (lambda: dochunkbatch(), b'chunk batch'),
2460 2462 ]
2461 2463
2462 2464 for engine in sorted(engines):
2463 2465 compressor = util.compengines[engine].revlogcompressor()
2464 2466 benches.append((functools.partial(docompress, compressor),
2465 2467 b'compress w/ %s' % engine))
2466 2468
2467 2469 for fn, title in benches:
2468 2470 timer, fm = gettimer(ui, opts)
2469 2471 timer(fn, title=title)
2470 2472 fm.end()
2471 2473
2472 2474 @command(b'perfrevlogrevision', revlogopts + formatteropts +
2473 2475 [(b'', b'cache', False, b'use caches instead of clearing')],
2474 2476 b'-c|-m|FILE REV')
2475 2477 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
2476 2478 """Benchmark obtaining a revlog revision.
2477 2479
2478 2480 Obtaining a revlog revision consists of roughly the following steps:
2479 2481
2480 2482 1. Compute the delta chain
2481 2483 2. Slice the delta chain if applicable
2482 2484 3. Obtain the raw chunks for that delta chain
2483 2485 4. Decompress each raw chunk
2484 2486 5. Apply binary patches to obtain fulltext
2485 2487 6. Verify hash of fulltext
2486 2488
2487 2489 This command measures the time spent in each of these phases.
2488 2490 """
2489 2491 opts = _byteskwargs(opts)
2490 2492
2491 2493 if opts.get(b'changelog') or opts.get(b'manifest'):
2492 2494 file_, rev = None, file_
2493 2495 elif rev is None:
2494 2496 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
2495 2497
2496 2498 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
2497 2499
2498 2500 # _chunkraw was renamed to _getsegmentforrevs.
2499 2501 try:
2500 2502 segmentforrevs = r._getsegmentforrevs
2501 2503 except AttributeError:
2502 2504 segmentforrevs = r._chunkraw
2503 2505
2504 2506 node = r.lookup(rev)
2505 2507 rev = r.rev(node)
2506 2508
2507 2509 def getrawchunks(data, chain):
2508 2510 start = r.start
2509 2511 length = r.length
2510 2512 inline = r._inline
2511 2513 iosize = r._io.size
2512 2514 buffer = util.buffer
2513 2515
2514 2516 chunks = []
2515 2517 ladd = chunks.append
2516 2518 for idx, item in enumerate(chain):
2517 2519 offset = start(item[0])
2518 2520 bits = data[idx]
2519 2521 for rev in item:
2520 2522 chunkstart = start(rev)
2521 2523 if inline:
2522 2524 chunkstart += (rev + 1) * iosize
2523 2525 chunklength = length(rev)
2524 2526 ladd(buffer(bits, chunkstart - offset, chunklength))
2525 2527
2526 2528 return chunks
2527 2529
2528 2530 def dodeltachain(rev):
2529 2531 if not cache:
2530 2532 r.clearcaches()
2531 2533 r._deltachain(rev)
2532 2534
2533 2535 def doread(chain):
2534 2536 if not cache:
2535 2537 r.clearcaches()
2536 2538 for item in slicedchain:
2537 2539 segmentforrevs(item[0], item[-1])
2538 2540
2539 2541 def doslice(r, chain, size):
2540 2542 for s in slicechunk(r, chain, targetsize=size):
2541 2543 pass
2542 2544
2543 2545 def dorawchunks(data, chain):
2544 2546 if not cache:
2545 2547 r.clearcaches()
2546 2548 getrawchunks(data, chain)
2547 2549
2548 2550 def dodecompress(chunks):
2549 2551 decomp = r.decompress
2550 2552 for chunk in chunks:
2551 2553 decomp(chunk)
2552 2554
2553 2555 def dopatch(text, bins):
2554 2556 if not cache:
2555 2557 r.clearcaches()
2556 2558 mdiff.patches(text, bins)
2557 2559
2558 2560 def dohash(text):
2559 2561 if not cache:
2560 2562 r.clearcaches()
2561 2563 r.checkhash(text, node, rev=rev)
2562 2564
2563 2565 def dorevision():
2564 2566 if not cache:
2565 2567 r.clearcaches()
2566 2568 r.revision(node)
2567 2569
2568 2570 try:
2569 2571 from mercurial.revlogutils.deltas import slicechunk
2570 2572 except ImportError:
2571 2573 slicechunk = getattr(revlog, '_slicechunk', None)
2572 2574
2573 2575 size = r.length(rev)
2574 2576 chain = r._deltachain(rev)[0]
2575 2577 if not getattr(r, '_withsparseread', False):
2576 2578 slicedchain = (chain,)
2577 2579 else:
2578 2580 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
2579 2581 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
2580 2582 rawchunks = getrawchunks(data, slicedchain)
2581 2583 bins = r._chunks(chain)
2582 2584 text = bytes(bins[0])
2583 2585 bins = bins[1:]
2584 2586 text = mdiff.patches(text, bins)
2585 2587
2586 2588 benches = [
2587 2589 (lambda: dorevision(), b'full'),
2588 2590 (lambda: dodeltachain(rev), b'deltachain'),
2589 2591 (lambda: doread(chain), b'read'),
2590 2592 ]
2591 2593
2592 2594 if getattr(r, '_withsparseread', False):
2593 2595 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
2594 2596 benches.append(slicing)
2595 2597
2596 2598 benches.extend([
2597 2599 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
2598 2600 (lambda: dodecompress(rawchunks), b'decompress'),
2599 2601 (lambda: dopatch(text, bins), b'patch'),
2600 2602 (lambda: dohash(text), b'hash'),
2601 2603 ])
2602 2604
2603 2605 timer, fm = gettimer(ui, opts)
2604 2606 for fn, title in benches:
2605 2607 timer(fn, title=title)
2606 2608 fm.end()
2607 2609
2608 2610 @command(b'perfrevset',
2609 2611 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
2610 2612 (b'', b'contexts', False, b'obtain changectx for each revision')]
2611 2613 + formatteropts, b"REVSET")
2612 2614 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
2613 2615 """benchmark the execution time of a revset
2614 2616
2615 2617 Use the --clean option if need to evaluate the impact of build volatile
2616 2618 revisions set cache on the revset execution. Volatile cache hold filtered
2617 2619 and obsolete related cache."""
2618 2620 opts = _byteskwargs(opts)
2619 2621
2620 2622 timer, fm = gettimer(ui, opts)
2621 2623 def d():
2622 2624 if clear:
2623 2625 repo.invalidatevolatilesets()
2624 2626 if contexts:
2625 2627 for ctx in repo.set(expr): pass
2626 2628 else:
2627 2629 for r in repo.revs(expr): pass
2628 2630 timer(d)
2629 2631 fm.end()
2630 2632
2631 2633 @command(b'perfvolatilesets',
2632 2634 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
2633 2635 ] + formatteropts)
2634 2636 def perfvolatilesets(ui, repo, *names, **opts):
2635 2637 """benchmark the computation of various volatile set
2636 2638
2637 2639 Volatile set computes element related to filtering and obsolescence."""
2638 2640 opts = _byteskwargs(opts)
2639 2641 timer, fm = gettimer(ui, opts)
2640 2642 repo = repo.unfiltered()
2641 2643
2642 2644 def getobs(name):
2643 2645 def d():
2644 2646 repo.invalidatevolatilesets()
2645 2647 if opts[b'clear_obsstore']:
2646 2648 clearfilecache(repo, b'obsstore')
2647 2649 obsolete.getrevs(repo, name)
2648 2650 return d
2649 2651
2650 2652 allobs = sorted(obsolete.cachefuncs)
2651 2653 if names:
2652 2654 allobs = [n for n in allobs if n in names]
2653 2655
2654 2656 for name in allobs:
2655 2657 timer(getobs(name), title=name)
2656 2658
2657 2659 def getfiltered(name):
2658 2660 def d():
2659 2661 repo.invalidatevolatilesets()
2660 2662 if opts[b'clear_obsstore']:
2661 2663 clearfilecache(repo, b'obsstore')
2662 2664 repoview.filterrevs(repo, name)
2663 2665 return d
2664 2666
2665 2667 allfilter = sorted(repoview.filtertable)
2666 2668 if names:
2667 2669 allfilter = [n for n in allfilter if n in names]
2668 2670
2669 2671 for name in allfilter:
2670 2672 timer(getfiltered(name), title=name)
2671 2673 fm.end()
2672 2674
2673 2675 @command(b'perfbranchmap',
2674 2676 [(b'f', b'full', False,
2675 2677 b'Includes build time of subset'),
2676 2678 (b'', b'clear-revbranch', False,
2677 2679 b'purge the revbranch cache between computation'),
2678 2680 ] + formatteropts)
2679 2681 def perfbranchmap(ui, repo, *filternames, **opts):
2680 2682 """benchmark the update of a branchmap
2681 2683
2682 2684 This benchmarks the full repo.branchmap() call with read and write disabled
2683 2685 """
2684 2686 opts = _byteskwargs(opts)
2685 2687 full = opts.get(b"full", False)
2686 2688 clear_revbranch = opts.get(b"clear_revbranch", False)
2687 2689 timer, fm = gettimer(ui, opts)
2688 2690 def getbranchmap(filtername):
2689 2691 """generate a benchmark function for the filtername"""
2690 2692 if filtername is None:
2691 2693 view = repo
2692 2694 else:
2693 2695 view = repo.filtered(filtername)
2694 2696 if util.safehasattr(view._branchcaches, '_per_filter'):
2695 2697 filtered = view._branchcaches._per_filter
2696 2698 else:
2697 2699 # older versions
2698 2700 filtered = view._branchcaches
2699 2701 def d():
2700 2702 if clear_revbranch:
2701 2703 repo.revbranchcache()._clear()
2702 2704 if full:
2703 2705 view._branchcaches.clear()
2704 2706 else:
2705 2707 filtered.pop(filtername, None)
2706 2708 view.branchmap()
2707 2709 return d
2708 2710 # add filter in smaller subset to bigger subset
2709 2711 possiblefilters = set(repoview.filtertable)
2710 2712 if filternames:
2711 2713 possiblefilters &= set(filternames)
2712 2714 subsettable = getbranchmapsubsettable()
2713 2715 allfilters = []
2714 2716 while possiblefilters:
2715 2717 for name in possiblefilters:
2716 2718 subset = subsettable.get(name)
2717 2719 if subset not in possiblefilters:
2718 2720 break
2719 2721 else:
2720 2722 assert False, b'subset cycle %s!' % possiblefilters
2721 2723 allfilters.append(name)
2722 2724 possiblefilters.remove(name)
2723 2725
2724 2726 # warm the cache
2725 2727 if not full:
2726 2728 for name in allfilters:
2727 2729 repo.filtered(name).branchmap()
2728 2730 if not filternames or b'unfiltered' in filternames:
2729 2731 # add unfiltered
2730 2732 allfilters.append(None)
2731 2733
2732 2734 if util.safehasattr(branchmap.branchcache, 'fromfile'):
2733 2735 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
2734 2736 branchcacheread.set(classmethod(lambda *args: None))
2735 2737 else:
2736 2738 # older versions
2737 2739 branchcacheread = safeattrsetter(branchmap, b'read')
2738 2740 branchcacheread.set(lambda *args: None)
2739 2741 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
2740 2742 branchcachewrite.set(lambda *args: None)
2741 2743 try:
2742 2744 for name in allfilters:
2743 2745 printname = name
2744 2746 if name is None:
2745 2747 printname = b'unfiltered'
2746 2748 timer(getbranchmap(name), title=str(printname))
2747 2749 finally:
2748 2750 branchcacheread.restore()
2749 2751 branchcachewrite.restore()
2750 2752 fm.end()
2751 2753
2752 2754 @command(b'perfbranchmapupdate', [
2753 2755 (b'', b'base', [], b'subset of revision to start from'),
2754 2756 (b'', b'target', [], b'subset of revision to end with'),
2755 2757 (b'', b'clear-caches', False, b'clear cache between each runs')
2756 2758 ] + formatteropts)
2757 2759 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
2758 2760 """benchmark branchmap update from for <base> revs to <target> revs
2759 2761
2760 2762 If `--clear-caches` is passed, the following items will be reset before
2761 2763 each update:
2762 2764 * the changelog instance and associated indexes
2763 2765 * the rev-branch-cache instance
2764 2766
2765 2767 Examples:
2766 2768
2767 2769 # update for the one last revision
2768 2770 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
2769 2771
2770 2772 $ update for change coming with a new branch
2771 2773 $ hg perfbranchmapupdate --base 'stable' --target 'default'
2772 2774 """
2773 2775 from mercurial import branchmap
2774 2776 from mercurial import repoview
2775 2777 opts = _byteskwargs(opts)
2776 2778 timer, fm = gettimer(ui, opts)
2777 2779 clearcaches = opts[b'clear_caches']
2778 2780 unfi = repo.unfiltered()
2779 2781 x = [None] # used to pass data between closure
2780 2782
2781 2783 # we use a `list` here to avoid possible side effect from smartset
2782 2784 baserevs = list(scmutil.revrange(repo, base))
2783 2785 targetrevs = list(scmutil.revrange(repo, target))
2784 2786 if not baserevs:
2785 2787 raise error.Abort(b'no revisions selected for --base')
2786 2788 if not targetrevs:
2787 2789 raise error.Abort(b'no revisions selected for --target')
2788 2790
2789 2791 # make sure the target branchmap also contains the one in the base
2790 2792 targetrevs = list(set(baserevs) | set(targetrevs))
2791 2793 targetrevs.sort()
2792 2794
2793 2795 cl = repo.changelog
2794 2796 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
2795 2797 allbaserevs.sort()
2796 2798 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
2797 2799
2798 2800 newrevs = list(alltargetrevs.difference(allbaserevs))
2799 2801 newrevs.sort()
2800 2802
2801 2803 allrevs = frozenset(unfi.changelog.revs())
2802 2804 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
2803 2805 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
2804 2806
2805 2807 def basefilter(repo, visibilityexceptions=None):
2806 2808 return basefilterrevs
2807 2809
2808 2810 def targetfilter(repo, visibilityexceptions=None):
2809 2811 return targetfilterrevs
2810 2812
2811 2813 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
2812 2814 ui.status(msg % (len(allbaserevs), len(newrevs)))
2813 2815 if targetfilterrevs:
2814 2816 msg = b'(%d revisions still filtered)\n'
2815 2817 ui.status(msg % len(targetfilterrevs))
2816 2818
2817 2819 try:
2818 2820 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
2819 2821 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
2820 2822
2821 2823 baserepo = repo.filtered(b'__perf_branchmap_update_base')
2822 2824 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
2823 2825
2824 2826 # try to find an existing branchmap to reuse
2825 2827 subsettable = getbranchmapsubsettable()
2826 2828 candidatefilter = subsettable.get(None)
2827 2829 while candidatefilter is not None:
2828 2830 candidatebm = repo.filtered(candidatefilter).branchmap()
2829 2831 if candidatebm.validfor(baserepo):
2830 2832 filtered = repoview.filterrevs(repo, candidatefilter)
2831 2833 missing = [r for r in allbaserevs if r in filtered]
2832 2834 base = candidatebm.copy()
2833 2835 base.update(baserepo, missing)
2834 2836 break
2835 2837 candidatefilter = subsettable.get(candidatefilter)
2836 2838 else:
2837 2839 # no suitable subset where found
2838 2840 base = branchmap.branchcache()
2839 2841 base.update(baserepo, allbaserevs)
2840 2842
2841 2843 def setup():
2842 2844 x[0] = base.copy()
2843 2845 if clearcaches:
2844 2846 unfi._revbranchcache = None
2845 2847 clearchangelog(repo)
2846 2848
2847 2849 def bench():
2848 2850 x[0].update(targetrepo, newrevs)
2849 2851
2850 2852 timer(bench, setup=setup)
2851 2853 fm.end()
2852 2854 finally:
2853 2855 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
2854 2856 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
2855 2857
2856 2858 @command(b'perfbranchmapload', [
2857 2859 (b'f', b'filter', b'', b'Specify repoview filter'),
2858 2860 (b'', b'list', False, b'List brachmap filter caches'),
2859 2861 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
2860 2862
2861 2863 ] + formatteropts)
2862 2864 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
2863 2865 """benchmark reading the branchmap"""
2864 2866 opts = _byteskwargs(opts)
2865 2867 clearrevlogs = opts[b'clear_revlogs']
2866 2868
2867 2869 if list:
2868 2870 for name, kind, st in repo.cachevfs.readdir(stat=True):
2869 2871 if name.startswith(b'branch2'):
2870 2872 filtername = name.partition(b'-')[2] or b'unfiltered'
2871 2873 ui.status(b'%s - %s\n'
2872 2874 % (filtername, util.bytecount(st.st_size)))
2873 2875 return
2874 2876 if not filter:
2875 2877 filter = None
2876 2878 subsettable = getbranchmapsubsettable()
2877 2879 if filter is None:
2878 2880 repo = repo.unfiltered()
2879 2881 else:
2880 2882 repo = repoview.repoview(repo, filter)
2881 2883
2882 2884 repo.branchmap() # make sure we have a relevant, up to date branchmap
2883 2885
2884 2886 try:
2885 2887 fromfile = branchmap.branchcache.fromfile
2886 2888 except AttributeError:
2887 2889 # older versions
2888 2890 fromfile = branchmap.read
2889 2891
2890 2892 currentfilter = filter
2891 2893 # try once without timer, the filter may not be cached
2892 2894 while fromfile(repo) is None:
2893 2895 currentfilter = subsettable.get(currentfilter)
2894 2896 if currentfilter is None:
2895 2897 raise error.Abort(b'No branchmap cached for %s repo'
2896 2898 % (filter or b'unfiltered'))
2897 2899 repo = repo.filtered(currentfilter)
2898 2900 timer, fm = gettimer(ui, opts)
2899 2901 def setup():
2900 2902 if clearrevlogs:
2901 2903 clearchangelog(repo)
2902 2904 def bench():
2903 2905 fromfile(repo)
2904 2906 timer(bench, setup=setup)
2905 2907 fm.end()
2906 2908
2907 2909 @command(b'perfloadmarkers')
2908 2910 def perfloadmarkers(ui, repo):
2909 2911 """benchmark the time to parse the on-disk markers for a repo
2910 2912
2911 2913 Result is the number of markers in the repo."""
2912 2914 timer, fm = gettimer(ui)
2913 2915 svfs = getsvfs(repo)
2914 2916 timer(lambda: len(obsolete.obsstore(svfs)))
2915 2917 fm.end()
2916 2918
2917 2919 @command(b'perflrucachedict', formatteropts +
2918 2920 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
2919 2921 (b'', b'mincost', 0, b'smallest cost of items in cache'),
2920 2922 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
2921 2923 (b'', b'size', 4, b'size of cache'),
2922 2924 (b'', b'gets', 10000, b'number of key lookups'),
2923 2925 (b'', b'sets', 10000, b'number of key sets'),
2924 2926 (b'', b'mixed', 10000, b'number of mixed mode operations'),
2925 2927 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
2926 2928 norepo=True)
2927 2929 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
2928 2930 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
2929 2931 opts = _byteskwargs(opts)
2930 2932
2931 2933 def doinit():
2932 2934 for i in _xrange(10000):
2933 2935 util.lrucachedict(size)
2934 2936
2935 2937 costrange = list(range(mincost, maxcost + 1))
2936 2938
2937 2939 values = []
2938 2940 for i in _xrange(size):
2939 2941 values.append(random.randint(0, _maxint))
2940 2942
2941 2943 # Get mode fills the cache and tests raw lookup performance with no
2942 2944 # eviction.
2943 2945 getseq = []
2944 2946 for i in _xrange(gets):
2945 2947 getseq.append(random.choice(values))
2946 2948
2947 2949 def dogets():
2948 2950 d = util.lrucachedict(size)
2949 2951 for v in values:
2950 2952 d[v] = v
2951 2953 for key in getseq:
2952 2954 value = d[key]
2953 2955 value # silence pyflakes warning
2954 2956
2955 2957 def dogetscost():
2956 2958 d = util.lrucachedict(size, maxcost=costlimit)
2957 2959 for i, v in enumerate(values):
2958 2960 d.insert(v, v, cost=costs[i])
2959 2961 for key in getseq:
2960 2962 try:
2961 2963 value = d[key]
2962 2964 value # silence pyflakes warning
2963 2965 except KeyError:
2964 2966 pass
2965 2967
2966 2968 # Set mode tests insertion speed with cache eviction.
2967 2969 setseq = []
2968 2970 costs = []
2969 2971 for i in _xrange(sets):
2970 2972 setseq.append(random.randint(0, _maxint))
2971 2973 costs.append(random.choice(costrange))
2972 2974
2973 2975 def doinserts():
2974 2976 d = util.lrucachedict(size)
2975 2977 for v in setseq:
2976 2978 d.insert(v, v)
2977 2979
2978 2980 def doinsertscost():
2979 2981 d = util.lrucachedict(size, maxcost=costlimit)
2980 2982 for i, v in enumerate(setseq):
2981 2983 d.insert(v, v, cost=costs[i])
2982 2984
2983 2985 def dosets():
2984 2986 d = util.lrucachedict(size)
2985 2987 for v in setseq:
2986 2988 d[v] = v
2987 2989
2988 2990 # Mixed mode randomly performs gets and sets with eviction.
2989 2991 mixedops = []
2990 2992 for i in _xrange(mixed):
2991 2993 r = random.randint(0, 100)
2992 2994 if r < mixedgetfreq:
2993 2995 op = 0
2994 2996 else:
2995 2997 op = 1
2996 2998
2997 2999 mixedops.append((op,
2998 3000 random.randint(0, size * 2),
2999 3001 random.choice(costrange)))
3000 3002
3001 3003 def domixed():
3002 3004 d = util.lrucachedict(size)
3003 3005
3004 3006 for op, v, cost in mixedops:
3005 3007 if op == 0:
3006 3008 try:
3007 3009 d[v]
3008 3010 except KeyError:
3009 3011 pass
3010 3012 else:
3011 3013 d[v] = v
3012 3014
3013 3015 def domixedcost():
3014 3016 d = util.lrucachedict(size, maxcost=costlimit)
3015 3017
3016 3018 for op, v, cost in mixedops:
3017 3019 if op == 0:
3018 3020 try:
3019 3021 d[v]
3020 3022 except KeyError:
3021 3023 pass
3022 3024 else:
3023 3025 d.insert(v, v, cost=cost)
3024 3026
3025 3027 benches = [
3026 3028 (doinit, b'init'),
3027 3029 ]
3028 3030
3029 3031 if costlimit:
3030 3032 benches.extend([
3031 3033 (dogetscost, b'gets w/ cost limit'),
3032 3034 (doinsertscost, b'inserts w/ cost limit'),
3033 3035 (domixedcost, b'mixed w/ cost limit'),
3034 3036 ])
3035 3037 else:
3036 3038 benches.extend([
3037 3039 (dogets, b'gets'),
3038 3040 (doinserts, b'inserts'),
3039 3041 (dosets, b'sets'),
3040 3042 (domixed, b'mixed')
3041 3043 ])
3042 3044
3043 3045 for fn, title in benches:
3044 3046 timer, fm = gettimer(ui, opts)
3045 3047 timer(fn, title=title)
3046 3048 fm.end()
3047 3049
3048 3050 @command(b'perfwrite', formatteropts)
3049 3051 def perfwrite(ui, repo, **opts):
3050 3052 """microbenchmark ui.write
3051 3053 """
3052 3054 opts = _byteskwargs(opts)
3053 3055
3054 3056 timer, fm = gettimer(ui, opts)
3055 3057 def write():
3056 3058 for i in range(100000):
3057 3059 ui.write((b'Testing write performance\n'))
3058 3060 timer(write)
3059 3061 fm.end()
3060 3062
3061 3063 def uisetup(ui):
3062 3064 if (util.safehasattr(cmdutil, b'openrevlog') and
3063 3065 not util.safehasattr(commands, b'debugrevlogopts')):
3064 3066 # for "historical portability":
3065 3067 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
3066 3068 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
3067 3069 # openrevlog() should cause failure, because it has been
3068 3070 # available since 3.5 (or 49c583ca48c4).
3069 3071 def openrevlog(orig, repo, cmd, file_, opts):
3070 3072 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
3071 3073 raise error.Abort(b"This version doesn't support --dir option",
3072 3074 hint=b"use 3.5 or later")
3073 3075 return orig(repo, cmd, file_, opts)
3074 3076 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
3075 3077
3076 3078 @command(b'perfprogress', formatteropts + [
3077 3079 (b'', b'topic', b'topic', b'topic for progress messages'),
3078 3080 (b'c', b'total', 1000000, b'total value we are progressing to'),
3079 3081 ], norepo=True)
3080 3082 def perfprogress(ui, topic=None, total=None, **opts):
3081 3083 """printing of progress bars"""
3082 3084 opts = _byteskwargs(opts)
3083 3085
3084 3086 timer, fm = gettimer(ui, opts)
3085 3087
3086 3088 def doprogress():
3087 3089 with ui.makeprogress(topic, total=total) as progress:
3088 for i in pycompat.xrange(total):
3090 for i in _xrange(total):
3089 3091 progress.increment()
3090 3092
3091 3093 timer(doprogress)
3092 3094 fm.end()
General Comments 0
You need to be logged in to leave comments. Login now