##// END OF EJS Templates
perf: add a --stats argument to perfhelper-pathcopies...
marmoute -
r43212:adac17fa default
parent child Browse files
Show More
@@ -1,3228 +1,3273 b''
1 1 # perf.py - performance test routines
2 2 '''helper extension to measure performance
3 3
4 4 Configurations
5 5 ==============
6 6
7 7 ``perf``
8 8 --------
9 9
10 10 ``all-timing``
11 11 When set, additional statistics will be reported for each benchmark: best,
12 12 worst, median average. If not set only the best timing is reported
13 13 (default: off).
14 14
15 15 ``presleep``
16 16 number of second to wait before any group of runs (default: 1)
17 17
18 18 ``pre-run``
19 19 number of run to perform before starting measurement.
20 20
21 21 ``profile-benchmark``
22 22 Enable profiling for the benchmarked section.
23 23 (The first iteration is benchmarked)
24 24
25 25 ``run-limits``
26 26 Control the number of runs each benchmark will perform. The option value
27 27 should be a list of `<time>-<numberofrun>` pairs. After each run the
28 28 conditions are considered in order with the following logic:
29 29
30 30 If benchmark has been running for <time> seconds, and we have performed
31 31 <numberofrun> iterations, stop the benchmark,
32 32
33 33 The default value is: `3.0-100, 10.0-3`
34 34
35 35 ``stub``
36 36 When set, benchmarks will only be run once, useful for testing
37 37 (default: off)
38 38 '''
39 39
40 40 # "historical portability" policy of perf.py:
41 41 #
42 42 # We have to do:
43 43 # - make perf.py "loadable" with as wide Mercurial version as possible
44 44 # This doesn't mean that perf commands work correctly with that Mercurial.
45 45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
46 46 # - make historical perf command work correctly with as wide Mercurial
47 47 # version as possible
48 48 #
49 49 # We have to do, if possible with reasonable cost:
50 50 # - make recent perf command for historical feature work correctly
51 51 # with early Mercurial
52 52 #
53 53 # We don't have to do:
54 54 # - make perf command for recent feature work correctly with early
55 55 # Mercurial
56 56
57 57 from __future__ import absolute_import
58 58 import contextlib
59 59 import functools
60 60 import gc
61 61 import os
62 62 import random
63 63 import shutil
64 64 import struct
65 65 import sys
66 66 import tempfile
67 67 import threading
68 68 import time
69 69 from mercurial import (
70 70 changegroup,
71 71 cmdutil,
72 72 commands,
73 73 copies,
74 74 error,
75 75 extensions,
76 76 hg,
77 77 mdiff,
78 78 merge,
79 79 revlog,
80 80 util,
81 81 )
82 82
83 83 # for "historical portability":
84 84 # try to import modules separately (in dict order), and ignore
85 85 # failure, because these aren't available with early Mercurial
86 86 try:
87 87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
88 88 except ImportError:
89 89 pass
90 90 try:
91 91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
92 92 except ImportError:
93 93 pass
94 94 try:
95 95 from mercurial import registrar # since 3.7 (or 37d50250b696)
96 96 dir(registrar) # forcibly load it
97 97 except ImportError:
98 98 registrar = None
99 99 try:
100 100 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
101 101 except ImportError:
102 102 pass
103 103 try:
104 104 from mercurial.utils import repoviewutil # since 5.0
105 105 except ImportError:
106 106 repoviewutil = None
107 107 try:
108 108 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
109 109 except ImportError:
110 110 pass
111 111 try:
112 112 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
113 113 except ImportError:
114 114 pass
115 115
116 116 try:
117 117 from mercurial import profiling
118 118 except ImportError:
119 119 profiling = None
120 120
121 121 def identity(a):
122 122 return a
123 123
124 124 try:
125 125 from mercurial import pycompat
126 126 getargspec = pycompat.getargspec # added to module after 4.5
127 127 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
128 128 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
129 129 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
130 130 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
131 131 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
132 132 if pycompat.ispy3:
133 133 _maxint = sys.maxsize # per py3 docs for replacing maxint
134 134 else:
135 135 _maxint = sys.maxint
136 136 except (NameError, ImportError, AttributeError):
137 137 import inspect
138 138 getargspec = inspect.getargspec
139 139 _byteskwargs = identity
140 140 _bytestr = str
141 141 fsencode = identity # no py3 support
142 142 _maxint = sys.maxint # no py3 support
143 143 _sysstr = lambda x: x # no py3 support
144 144 _xrange = xrange
145 145
146 146 try:
147 147 # 4.7+
148 148 queue = pycompat.queue.Queue
149 149 except (NameError, AttributeError, ImportError):
150 150 # <4.7.
151 151 try:
152 152 queue = pycompat.queue
153 153 except (NameError, AttributeError, ImportError):
154 154 import Queue as queue
155 155
156 156 try:
157 157 from mercurial import logcmdutil
158 158 makelogtemplater = logcmdutil.maketemplater
159 159 except (AttributeError, ImportError):
160 160 try:
161 161 makelogtemplater = cmdutil.makelogtemplater
162 162 except (AttributeError, ImportError):
163 163 makelogtemplater = None
164 164
165 165 # for "historical portability":
166 166 # define util.safehasattr forcibly, because util.safehasattr has been
167 167 # available since 1.9.3 (or 94b200a11cf7)
168 168 _undefined = object()
169 169 def safehasattr(thing, attr):
170 170 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
171 171 setattr(util, 'safehasattr', safehasattr)
172 172
173 173 # for "historical portability":
174 174 # define util.timer forcibly, because util.timer has been available
175 175 # since ae5d60bb70c9
176 176 if safehasattr(time, 'perf_counter'):
177 177 util.timer = time.perf_counter
178 178 elif os.name == b'nt':
179 179 util.timer = time.clock
180 180 else:
181 181 util.timer = time.time
182 182
183 183 # for "historical portability":
184 184 # use locally defined empty option list, if formatteropts isn't
185 185 # available, because commands.formatteropts has been available since
186 186 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
187 187 # available since 2.2 (or ae5f92e154d3)
188 188 formatteropts = getattr(cmdutil, "formatteropts",
189 189 getattr(commands, "formatteropts", []))
190 190
191 191 # for "historical portability":
192 192 # use locally defined option list, if debugrevlogopts isn't available,
193 193 # because commands.debugrevlogopts has been available since 3.7 (or
194 194 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
195 195 # since 1.9 (or a79fea6b3e77).
196 196 revlogopts = getattr(cmdutil, "debugrevlogopts",
197 197 getattr(commands, "debugrevlogopts", [
198 198 (b'c', b'changelog', False, (b'open changelog')),
199 199 (b'm', b'manifest', False, (b'open manifest')),
200 200 (b'', b'dir', False, (b'open directory manifest')),
201 201 ]))
202 202
203 203 cmdtable = {}
204 204
205 205 # for "historical portability":
206 206 # define parsealiases locally, because cmdutil.parsealiases has been
207 207 # available since 1.5 (or 6252852b4332)
208 208 def parsealiases(cmd):
209 209 return cmd.split(b"|")
210 210
211 211 if safehasattr(registrar, 'command'):
212 212 command = registrar.command(cmdtable)
213 213 elif safehasattr(cmdutil, 'command'):
214 214 command = cmdutil.command(cmdtable)
215 215 if b'norepo' not in getargspec(command).args:
216 216 # for "historical portability":
217 217 # wrap original cmdutil.command, because "norepo" option has
218 218 # been available since 3.1 (or 75a96326cecb)
219 219 _command = command
220 220 def command(name, options=(), synopsis=None, norepo=False):
221 221 if norepo:
222 222 commands.norepo += b' %s' % b' '.join(parsealiases(name))
223 223 return _command(name, list(options), synopsis)
224 224 else:
225 225 # for "historical portability":
226 226 # define "@command" annotation locally, because cmdutil.command
227 227 # has been available since 1.9 (or 2daa5179e73f)
228 228 def command(name, options=(), synopsis=None, norepo=False):
229 229 def decorator(func):
230 230 if synopsis:
231 231 cmdtable[name] = func, list(options), synopsis
232 232 else:
233 233 cmdtable[name] = func, list(options)
234 234 if norepo:
235 235 commands.norepo += b' %s' % b' '.join(parsealiases(name))
236 236 return func
237 237 return decorator
238 238
239 239 try:
240 240 import mercurial.registrar
241 241 import mercurial.configitems
242 242 configtable = {}
243 243 configitem = mercurial.registrar.configitem(configtable)
244 244 configitem(b'perf', b'presleep',
245 245 default=mercurial.configitems.dynamicdefault,
246 246 experimental=True,
247 247 )
248 248 configitem(b'perf', b'stub',
249 249 default=mercurial.configitems.dynamicdefault,
250 250 experimental=True,
251 251 )
252 252 configitem(b'perf', b'parentscount',
253 253 default=mercurial.configitems.dynamicdefault,
254 254 experimental=True,
255 255 )
256 256 configitem(b'perf', b'all-timing',
257 257 default=mercurial.configitems.dynamicdefault,
258 258 experimental=True,
259 259 )
260 260 configitem(b'perf', b'pre-run',
261 261 default=mercurial.configitems.dynamicdefault,
262 262 )
263 263 configitem(b'perf', b'profile-benchmark',
264 264 default=mercurial.configitems.dynamicdefault,
265 265 )
266 266 configitem(b'perf', b'run-limits',
267 267 default=mercurial.configitems.dynamicdefault,
268 268 experimental=True,
269 269 )
270 270 except (ImportError, AttributeError):
271 271 pass
272 272 except TypeError:
273 273 # compatibility fix for a11fd395e83f
274 274 # hg version: 5.2
275 275 configitem(b'perf', b'presleep',
276 276 default=mercurial.configitems.dynamicdefault,
277 277 )
278 278 configitem(b'perf', b'stub',
279 279 default=mercurial.configitems.dynamicdefault,
280 280 )
281 281 configitem(b'perf', b'parentscount',
282 282 default=mercurial.configitems.dynamicdefault,
283 283 )
284 284 configitem(b'perf', b'all-timing',
285 285 default=mercurial.configitems.dynamicdefault,
286 286 )
287 287 configitem(b'perf', b'pre-run',
288 288 default=mercurial.configitems.dynamicdefault,
289 289 )
290 290 configitem(b'perf', b'profile-benchmark',
291 291 default=mercurial.configitems.dynamicdefault,
292 292 )
293 293 configitem(b'perf', b'run-limits',
294 294 default=mercurial.configitems.dynamicdefault,
295 295 )
296 296
297 297 def getlen(ui):
298 298 if ui.configbool(b"perf", b"stub", False):
299 299 return lambda x: 1
300 300 return len
301 301
302 302 class noop(object):
303 303 """dummy context manager"""
304 304 def __enter__(self):
305 305 pass
306 306 def __exit__(self, *args):
307 307 pass
308 308
309 309 NOOPCTX = noop()
310 310
311 311 def gettimer(ui, opts=None):
312 312 """return a timer function and formatter: (timer, formatter)
313 313
314 314 This function exists to gather the creation of formatter in a single
315 315 place instead of duplicating it in all performance commands."""
316 316
317 317 # enforce an idle period before execution to counteract power management
318 318 # experimental config: perf.presleep
319 319 time.sleep(getint(ui, b"perf", b"presleep", 1))
320 320
321 321 if opts is None:
322 322 opts = {}
323 323 # redirect all to stderr unless buffer api is in use
324 324 if not ui._buffers:
325 325 ui = ui.copy()
326 326 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
327 327 if uifout:
328 328 # for "historical portability":
329 329 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
330 330 uifout.set(ui.ferr)
331 331
332 332 # get a formatter
333 333 uiformatter = getattr(ui, 'formatter', None)
334 334 if uiformatter:
335 335 fm = uiformatter(b'perf', opts)
336 336 else:
337 337 # for "historical portability":
338 338 # define formatter locally, because ui.formatter has been
339 339 # available since 2.2 (or ae5f92e154d3)
340 340 from mercurial import node
341 341 class defaultformatter(object):
342 342 """Minimized composition of baseformatter and plainformatter
343 343 """
344 344 def __init__(self, ui, topic, opts):
345 345 self._ui = ui
346 346 if ui.debugflag:
347 347 self.hexfunc = node.hex
348 348 else:
349 349 self.hexfunc = node.short
350 350 def __nonzero__(self):
351 351 return False
352 352 __bool__ = __nonzero__
353 353 def startitem(self):
354 354 pass
355 355 def data(self, **data):
356 356 pass
357 357 def write(self, fields, deftext, *fielddata, **opts):
358 358 self._ui.write(deftext % fielddata, **opts)
359 359 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
360 360 if cond:
361 361 self._ui.write(deftext % fielddata, **opts)
362 362 def plain(self, text, **opts):
363 363 self._ui.write(text, **opts)
364 364 def end(self):
365 365 pass
366 366 fm = defaultformatter(ui, b'perf', opts)
367 367
368 368 # stub function, runs code only once instead of in a loop
369 369 # experimental config: perf.stub
370 370 if ui.configbool(b"perf", b"stub", False):
371 371 return functools.partial(stub_timer, fm), fm
372 372
373 373 # experimental config: perf.all-timing
374 374 displayall = ui.configbool(b"perf", b"all-timing", False)
375 375
376 376 # experimental config: perf.run-limits
377 377 limitspec = ui.configlist(b"perf", b"run-limits", [])
378 378 limits = []
379 379 for item in limitspec:
380 380 parts = item.split(b'-', 1)
381 381 if len(parts) < 2:
382 382 ui.warn((b'malformatted run limit entry, missing "-": %s\n'
383 383 % item))
384 384 continue
385 385 try:
386 386 time_limit = float(_sysstr(parts[0]))
387 387 except ValueError as e:
388 388 ui.warn((b'malformatted run limit entry, %s: %s\n'
389 389 % (_bytestr(e), item)))
390 390 continue
391 391 try:
392 392 run_limit = int(_sysstr(parts[1]))
393 393 except ValueError as e:
394 394 ui.warn((b'malformatted run limit entry, %s: %s\n'
395 395 % (_bytestr(e), item)))
396 396 continue
397 397 limits.append((time_limit, run_limit))
398 398 if not limits:
399 399 limits = DEFAULTLIMITS
400 400
401 401 profiler = None
402 402 if profiling is not None:
403 403 if ui.configbool(b"perf", b"profile-benchmark", False):
404 404 profiler = profiling.profile(ui)
405 405
406 406 prerun = getint(ui, b"perf", b"pre-run", 0)
407 407 t = functools.partial(_timer, fm, displayall=displayall, limits=limits,
408 408 prerun=prerun, profiler=profiler)
409 409 return t, fm
410 410
411 411 def stub_timer(fm, func, setup=None, title=None):
412 412 if setup is not None:
413 413 setup()
414 414 func()
415 415
416 416 @contextlib.contextmanager
417 417 def timeone():
418 418 r = []
419 419 ostart = os.times()
420 420 cstart = util.timer()
421 421 yield r
422 422 cstop = util.timer()
423 423 ostop = os.times()
424 424 a, b = ostart, ostop
425 425 r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
426 426
427 427
428 428 # list of stop condition (elapsed time, minimal run count)
429 429 DEFAULTLIMITS = (
430 430 (3.0, 100),
431 431 (10.0, 3),
432 432 )
433 433
434 434 def _timer(fm, func, setup=None, title=None, displayall=False,
435 435 limits=DEFAULTLIMITS, prerun=0, profiler=None):
436 436 gc.collect()
437 437 results = []
438 438 begin = util.timer()
439 439 count = 0
440 440 if profiler is None:
441 441 profiler = NOOPCTX
442 442 for i in range(prerun):
443 443 if setup is not None:
444 444 setup()
445 445 func()
446 446 keepgoing = True
447 447 while keepgoing:
448 448 if setup is not None:
449 449 setup()
450 450 with profiler:
451 451 with timeone() as item:
452 452 r = func()
453 453 profiler = NOOPCTX
454 454 count += 1
455 455 results.append(item[0])
456 456 cstop = util.timer()
457 457 # Look for a stop condition.
458 458 elapsed = cstop - begin
459 459 for t, mincount in limits:
460 460 if elapsed >= t and count >= mincount:
461 461 keepgoing = False
462 462 break
463 463
464 464 formatone(fm, results, title=title, result=r,
465 465 displayall=displayall)
466 466
467 467 def formatone(fm, timings, title=None, result=None, displayall=False):
468 468
469 469 count = len(timings)
470 470
471 471 fm.startitem()
472 472
473 473 if title:
474 474 fm.write(b'title', b'! %s\n', title)
475 475 if result:
476 476 fm.write(b'result', b'! result: %s\n', result)
477 477 def display(role, entry):
478 478 prefix = b''
479 479 if role != b'best':
480 480 prefix = b'%s.' % role
481 481 fm.plain(b'!')
482 482 fm.write(prefix + b'wall', b' wall %f', entry[0])
483 483 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
484 484 fm.write(prefix + b'user', b' user %f', entry[1])
485 485 fm.write(prefix + b'sys', b' sys %f', entry[2])
486 486 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
487 487 fm.plain(b'\n')
488 488 timings.sort()
489 489 min_val = timings[0]
490 490 display(b'best', min_val)
491 491 if displayall:
492 492 max_val = timings[-1]
493 493 display(b'max', max_val)
494 494 avg = tuple([sum(x) / count for x in zip(*timings)])
495 495 display(b'avg', avg)
496 496 median = timings[len(timings) // 2]
497 497 display(b'median', median)
498 498
499 499 # utilities for historical portability
500 500
501 501 def getint(ui, section, name, default):
502 502 # for "historical portability":
503 503 # ui.configint has been available since 1.9 (or fa2b596db182)
504 504 v = ui.config(section, name, None)
505 505 if v is None:
506 506 return default
507 507 try:
508 508 return int(v)
509 509 except ValueError:
510 510 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
511 511 % (section, name, v))
512 512
513 513 def safeattrsetter(obj, name, ignoremissing=False):
514 514 """Ensure that 'obj' has 'name' attribute before subsequent setattr
515 515
516 516 This function is aborted, if 'obj' doesn't have 'name' attribute
517 517 at runtime. This avoids overlooking removal of an attribute, which
518 518 breaks assumption of performance measurement, in the future.
519 519
520 520 This function returns the object to (1) assign a new value, and
521 521 (2) restore an original value to the attribute.
522 522
523 523 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
524 524 abortion, and this function returns None. This is useful to
525 525 examine an attribute, which isn't ensured in all Mercurial
526 526 versions.
527 527 """
528 528 if not util.safehasattr(obj, name):
529 529 if ignoremissing:
530 530 return None
531 531 raise error.Abort((b"missing attribute %s of %s might break assumption"
532 532 b" of performance measurement") % (name, obj))
533 533
534 534 origvalue = getattr(obj, _sysstr(name))
535 535 class attrutil(object):
536 536 def set(self, newvalue):
537 537 setattr(obj, _sysstr(name), newvalue)
538 538 def restore(self):
539 539 setattr(obj, _sysstr(name), origvalue)
540 540
541 541 return attrutil()
542 542
543 543 # utilities to examine each internal API changes
544 544
545 545 def getbranchmapsubsettable():
546 546 # for "historical portability":
547 547 # subsettable is defined in:
548 548 # - branchmap since 2.9 (or 175c6fd8cacc)
549 549 # - repoview since 2.5 (or 59a9f18d4587)
550 550 # - repoviewutil since 5.0
551 551 for mod in (branchmap, repoview, repoviewutil):
552 552 subsettable = getattr(mod, 'subsettable', None)
553 553 if subsettable:
554 554 return subsettable
555 555
556 556 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
557 557 # branchmap and repoview modules exist, but subsettable attribute
558 558 # doesn't)
559 559 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
560 560 hint=b"use 2.5 or later")
561 561
562 562 def getsvfs(repo):
563 563 """Return appropriate object to access files under .hg/store
564 564 """
565 565 # for "historical portability":
566 566 # repo.svfs has been available since 2.3 (or 7034365089bf)
567 567 svfs = getattr(repo, 'svfs', None)
568 568 if svfs:
569 569 return svfs
570 570 else:
571 571 return getattr(repo, 'sopener')
572 572
573 573 def getvfs(repo):
574 574 """Return appropriate object to access files under .hg
575 575 """
576 576 # for "historical portability":
577 577 # repo.vfs has been available since 2.3 (or 7034365089bf)
578 578 vfs = getattr(repo, 'vfs', None)
579 579 if vfs:
580 580 return vfs
581 581 else:
582 582 return getattr(repo, 'opener')
583 583
584 584 def repocleartagscachefunc(repo):
585 585 """Return the function to clear tags cache according to repo internal API
586 586 """
587 587 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
588 588 # in this case, setattr(repo, '_tagscache', None) or so isn't
589 589 # correct way to clear tags cache, because existing code paths
590 590 # expect _tagscache to be a structured object.
591 591 def clearcache():
592 592 # _tagscache has been filteredpropertycache since 2.5 (or
593 593 # 98c867ac1330), and delattr() can't work in such case
594 594 if b'_tagscache' in vars(repo):
595 595 del repo.__dict__[b'_tagscache']
596 596 return clearcache
597 597
598 598 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
599 599 if repotags: # since 1.4 (or 5614a628d173)
600 600 return lambda : repotags.set(None)
601 601
602 602 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
603 603 if repotagscache: # since 0.6 (or d7df759d0e97)
604 604 return lambda : repotagscache.set(None)
605 605
606 606 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
607 607 # this point, but it isn't so problematic, because:
608 608 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
609 609 # in perftags() causes failure soon
610 610 # - perf.py itself has been available since 1.1 (or eb240755386d)
611 611 raise error.Abort((b"tags API of this hg command is unknown"))
612 612
613 613 # utilities to clear cache
614 614
615 615 def clearfilecache(obj, attrname):
616 616 unfiltered = getattr(obj, 'unfiltered', None)
617 617 if unfiltered is not None:
618 618 obj = obj.unfiltered()
619 619 if attrname in vars(obj):
620 620 delattr(obj, attrname)
621 621 obj._filecache.pop(attrname, None)
622 622
623 623 def clearchangelog(repo):
624 624 if repo is not repo.unfiltered():
625 625 object.__setattr__(repo, r'_clcachekey', None)
626 626 object.__setattr__(repo, r'_clcache', None)
627 627 clearfilecache(repo.unfiltered(), 'changelog')
628 628
629 629 # perf commands
630 630
631 631 @command(b'perfwalk', formatteropts)
632 632 def perfwalk(ui, repo, *pats, **opts):
633 633 opts = _byteskwargs(opts)
634 634 timer, fm = gettimer(ui, opts)
635 635 m = scmutil.match(repo[None], pats, {})
636 636 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
637 637 ignored=False))))
638 638 fm.end()
639 639
640 640 @command(b'perfannotate', formatteropts)
641 641 def perfannotate(ui, repo, f, **opts):
642 642 opts = _byteskwargs(opts)
643 643 timer, fm = gettimer(ui, opts)
644 644 fc = repo[b'.'][f]
645 645 timer(lambda: len(fc.annotate(True)))
646 646 fm.end()
647 647
648 648 @command(b'perfstatus',
649 649 [(b'u', b'unknown', False,
650 650 b'ask status to look for unknown files')] + formatteropts)
651 651 def perfstatus(ui, repo, **opts):
652 652 opts = _byteskwargs(opts)
653 653 #m = match.always(repo.root, repo.getcwd())
654 654 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
655 655 # False))))
656 656 timer, fm = gettimer(ui, opts)
657 657 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
658 658 fm.end()
659 659
660 660 @command(b'perfaddremove', formatteropts)
661 661 def perfaddremove(ui, repo, **opts):
662 662 opts = _byteskwargs(opts)
663 663 timer, fm = gettimer(ui, opts)
664 664 try:
665 665 oldquiet = repo.ui.quiet
666 666 repo.ui.quiet = True
667 667 matcher = scmutil.match(repo[None])
668 668 opts[b'dry_run'] = True
669 669 if b'uipathfn' in getargspec(scmutil.addremove).args:
670 670 uipathfn = scmutil.getuipathfn(repo)
671 671 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
672 672 else:
673 673 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
674 674 finally:
675 675 repo.ui.quiet = oldquiet
676 676 fm.end()
677 677
678 678 def clearcaches(cl):
679 679 # behave somewhat consistently across internal API changes
680 680 if util.safehasattr(cl, b'clearcaches'):
681 681 cl.clearcaches()
682 682 elif util.safehasattr(cl, b'_nodecache'):
683 683 from mercurial.node import nullid, nullrev
684 684 cl._nodecache = {nullid: nullrev}
685 685 cl._nodepos = None
686 686
687 687 @command(b'perfheads', formatteropts)
688 688 def perfheads(ui, repo, **opts):
689 689 """benchmark the computation of a changelog heads"""
690 690 opts = _byteskwargs(opts)
691 691 timer, fm = gettimer(ui, opts)
692 692 cl = repo.changelog
693 693 def s():
694 694 clearcaches(cl)
695 695 def d():
696 696 len(cl.headrevs())
697 697 timer(d, setup=s)
698 698 fm.end()
699 699
700 700 @command(b'perftags', formatteropts+
701 701 [
702 702 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
703 703 ])
704 704 def perftags(ui, repo, **opts):
705 705 opts = _byteskwargs(opts)
706 706 timer, fm = gettimer(ui, opts)
707 707 repocleartagscache = repocleartagscachefunc(repo)
708 708 clearrevlogs = opts[b'clear_revlogs']
709 709 def s():
710 710 if clearrevlogs:
711 711 clearchangelog(repo)
712 712 clearfilecache(repo.unfiltered(), 'manifest')
713 713 repocleartagscache()
714 714 def t():
715 715 return len(repo.tags())
716 716 timer(t, setup=s)
717 717 fm.end()
718 718
719 719 @command(b'perfancestors', formatteropts)
720 720 def perfancestors(ui, repo, **opts):
721 721 opts = _byteskwargs(opts)
722 722 timer, fm = gettimer(ui, opts)
723 723 heads = repo.changelog.headrevs()
724 724 def d():
725 725 for a in repo.changelog.ancestors(heads):
726 726 pass
727 727 timer(d)
728 728 fm.end()
729 729
730 730 @command(b'perfancestorset', formatteropts)
731 731 def perfancestorset(ui, repo, revset, **opts):
732 732 opts = _byteskwargs(opts)
733 733 timer, fm = gettimer(ui, opts)
734 734 revs = repo.revs(revset)
735 735 heads = repo.changelog.headrevs()
736 736 def d():
737 737 s = repo.changelog.ancestors(heads)
738 738 for rev in revs:
739 739 rev in s
740 740 timer(d)
741 741 fm.end()
742 742
743 743 @command(b'perfdiscovery', formatteropts, b'PATH')
744 744 def perfdiscovery(ui, repo, path, **opts):
745 745 """benchmark discovery between local repo and the peer at given path
746 746 """
747 747 repos = [repo, None]
748 748 timer, fm = gettimer(ui, opts)
749 749 path = ui.expandpath(path)
750 750
751 751 def s():
752 752 repos[1] = hg.peer(ui, opts, path)
753 753 def d():
754 754 setdiscovery.findcommonheads(ui, *repos)
755 755 timer(d, setup=s)
756 756 fm.end()
757 757
758 758 @command(b'perfbookmarks', formatteropts +
759 759 [
760 760 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
761 761 ])
762 762 def perfbookmarks(ui, repo, **opts):
763 763 """benchmark parsing bookmarks from disk to memory"""
764 764 opts = _byteskwargs(opts)
765 765 timer, fm = gettimer(ui, opts)
766 766
767 767 clearrevlogs = opts[b'clear_revlogs']
768 768 def s():
769 769 if clearrevlogs:
770 770 clearchangelog(repo)
771 771 clearfilecache(repo, b'_bookmarks')
772 772 def d():
773 773 repo._bookmarks
774 774 timer(d, setup=s)
775 775 fm.end()
776 776
777 777 @command(b'perfbundleread', formatteropts, b'BUNDLE')
778 778 def perfbundleread(ui, repo, bundlepath, **opts):
779 779 """Benchmark reading of bundle files.
780 780
781 781 This command is meant to isolate the I/O part of bundle reading as
782 782 much as possible.
783 783 """
784 784 from mercurial import (
785 785 bundle2,
786 786 exchange,
787 787 streamclone,
788 788 )
789 789
790 790 opts = _byteskwargs(opts)
791 791
792 792 def makebench(fn):
793 793 def run():
794 794 with open(bundlepath, b'rb') as fh:
795 795 bundle = exchange.readbundle(ui, fh, bundlepath)
796 796 fn(bundle)
797 797
798 798 return run
799 799
800 800 def makereadnbytes(size):
801 801 def run():
802 802 with open(bundlepath, b'rb') as fh:
803 803 bundle = exchange.readbundle(ui, fh, bundlepath)
804 804 while bundle.read(size):
805 805 pass
806 806
807 807 return run
808 808
809 809 def makestdioread(size):
810 810 def run():
811 811 with open(bundlepath, b'rb') as fh:
812 812 while fh.read(size):
813 813 pass
814 814
815 815 return run
816 816
817 817 # bundle1
818 818
819 819 def deltaiter(bundle):
820 820 for delta in bundle.deltaiter():
821 821 pass
822 822
823 823 def iterchunks(bundle):
824 824 for chunk in bundle.getchunks():
825 825 pass
826 826
827 827 # bundle2
828 828
829 829 def forwardchunks(bundle):
830 830 for chunk in bundle._forwardchunks():
831 831 pass
832 832
833 833 def iterparts(bundle):
834 834 for part in bundle.iterparts():
835 835 pass
836 836
837 837 def iterpartsseekable(bundle):
838 838 for part in bundle.iterparts(seekable=True):
839 839 pass
840 840
841 841 def seek(bundle):
842 842 for part in bundle.iterparts(seekable=True):
843 843 part.seek(0, os.SEEK_END)
844 844
845 845 def makepartreadnbytes(size):
846 846 def run():
847 847 with open(bundlepath, b'rb') as fh:
848 848 bundle = exchange.readbundle(ui, fh, bundlepath)
849 849 for part in bundle.iterparts():
850 850 while part.read(size):
851 851 pass
852 852
853 853 return run
854 854
855 855 benches = [
856 856 (makestdioread(8192), b'read(8k)'),
857 857 (makestdioread(16384), b'read(16k)'),
858 858 (makestdioread(32768), b'read(32k)'),
859 859 (makestdioread(131072), b'read(128k)'),
860 860 ]
861 861
862 862 with open(bundlepath, b'rb') as fh:
863 863 bundle = exchange.readbundle(ui, fh, bundlepath)
864 864
865 865 if isinstance(bundle, changegroup.cg1unpacker):
866 866 benches.extend([
867 867 (makebench(deltaiter), b'cg1 deltaiter()'),
868 868 (makebench(iterchunks), b'cg1 getchunks()'),
869 869 (makereadnbytes(8192), b'cg1 read(8k)'),
870 870 (makereadnbytes(16384), b'cg1 read(16k)'),
871 871 (makereadnbytes(32768), b'cg1 read(32k)'),
872 872 (makereadnbytes(131072), b'cg1 read(128k)'),
873 873 ])
874 874 elif isinstance(bundle, bundle2.unbundle20):
875 875 benches.extend([
876 876 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
877 877 (makebench(iterparts), b'bundle2 iterparts()'),
878 878 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
879 879 (makebench(seek), b'bundle2 part seek()'),
880 880 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
881 881 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
882 882 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
883 883 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
884 884 ])
885 885 elif isinstance(bundle, streamclone.streamcloneapplier):
886 886 raise error.Abort(b'stream clone bundles not supported')
887 887 else:
888 888 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
889 889
890 890 for fn, title in benches:
891 891 timer, fm = gettimer(ui, opts)
892 892 timer(fn, title=title)
893 893 fm.end()
894 894
895 895 @command(b'perfchangegroupchangelog', formatteropts +
896 896 [(b'', b'cgversion', b'02', b'changegroup version'),
897 897 (b'r', b'rev', b'', b'revisions to add to changegroup')])
898 898 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
899 899 """Benchmark producing a changelog group for a changegroup.
900 900
901 901 This measures the time spent processing the changelog during a
902 902 bundle operation. This occurs during `hg bundle` and on a server
903 903 processing a `getbundle` wire protocol request (handles clones
904 904 and pull requests).
905 905
906 906 By default, all revisions are added to the changegroup.
907 907 """
908 908 opts = _byteskwargs(opts)
909 909 cl = repo.changelog
910 910 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
911 911 bundler = changegroup.getbundler(cgversion, repo)
912 912
913 913 def d():
914 914 state, chunks = bundler._generatechangelog(cl, nodes)
915 915 for chunk in chunks:
916 916 pass
917 917
918 918 timer, fm = gettimer(ui, opts)
919 919
920 920 # Terminal printing can interfere with timing. So disable it.
921 921 with ui.configoverride({(b'progress', b'disable'): True}):
922 922 timer(d)
923 923
924 924 fm.end()
925 925
926 926 @command(b'perfdirs', formatteropts)
927 927 def perfdirs(ui, repo, **opts):
928 928 opts = _byteskwargs(opts)
929 929 timer, fm = gettimer(ui, opts)
930 930 dirstate = repo.dirstate
931 931 b'a' in dirstate
932 932 def d():
933 933 dirstate.hasdir(b'a')
934 934 del dirstate._map._dirs
935 935 timer(d)
936 936 fm.end()
937 937
938 938 @command(b'perfdirstate', formatteropts)
939 939 def perfdirstate(ui, repo, **opts):
940 940 opts = _byteskwargs(opts)
941 941 timer, fm = gettimer(ui, opts)
942 942 b"a" in repo.dirstate
943 943 def d():
944 944 repo.dirstate.invalidate()
945 945 b"a" in repo.dirstate
946 946 timer(d)
947 947 fm.end()
948 948
949 949 @command(b'perfdirstatedirs', formatteropts)
950 950 def perfdirstatedirs(ui, repo, **opts):
951 951 opts = _byteskwargs(opts)
952 952 timer, fm = gettimer(ui, opts)
953 953 b"a" in repo.dirstate
954 954 def d():
955 955 repo.dirstate.hasdir(b"a")
956 956 del repo.dirstate._map._dirs
957 957 timer(d)
958 958 fm.end()
959 959
960 960 @command(b'perfdirstatefoldmap', formatteropts)
961 961 def perfdirstatefoldmap(ui, repo, **opts):
962 962 opts = _byteskwargs(opts)
963 963 timer, fm = gettimer(ui, opts)
964 964 dirstate = repo.dirstate
965 965 b'a' in dirstate
966 966 def d():
967 967 dirstate._map.filefoldmap.get(b'a')
968 968 del dirstate._map.filefoldmap
969 969 timer(d)
970 970 fm.end()
971 971
972 972 @command(b'perfdirfoldmap', formatteropts)
973 973 def perfdirfoldmap(ui, repo, **opts):
974 974 opts = _byteskwargs(opts)
975 975 timer, fm = gettimer(ui, opts)
976 976 dirstate = repo.dirstate
977 977 b'a' in dirstate
978 978 def d():
979 979 dirstate._map.dirfoldmap.get(b'a')
980 980 del dirstate._map.dirfoldmap
981 981 del dirstate._map._dirs
982 982 timer(d)
983 983 fm.end()
984 984
985 985 @command(b'perfdirstatewrite', formatteropts)
986 986 def perfdirstatewrite(ui, repo, **opts):
987 987 opts = _byteskwargs(opts)
988 988 timer, fm = gettimer(ui, opts)
989 989 ds = repo.dirstate
990 990 b"a" in ds
991 991 def d():
992 992 ds._dirty = True
993 993 ds.write(repo.currenttransaction())
994 994 timer(d)
995 995 fm.end()
996 996
997 997 def _getmergerevs(repo, opts):
998 998 """parse command argument to return rev involved in merge
999 999
1000 1000 input: options dictionnary with `rev`, `from` and `bse`
1001 1001 output: (localctx, otherctx, basectx)
1002 1002 """
1003 1003 if opts[b'from']:
1004 1004 fromrev = scmutil.revsingle(repo, opts[b'from'])
1005 1005 wctx = repo[fromrev]
1006 1006 else:
1007 1007 wctx = repo[None]
1008 1008 # we don't want working dir files to be stat'd in the benchmark, so
1009 1009 # prime that cache
1010 1010 wctx.dirty()
1011 1011 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1012 1012 if opts[b'base']:
1013 1013 fromrev = scmutil.revsingle(repo, opts[b'base'])
1014 1014 ancestor = repo[fromrev]
1015 1015 else:
1016 1016 ancestor = wctx.ancestor(rctx)
1017 1017 return (wctx, rctx, ancestor)
1018 1018
1019 1019 @command(b'perfmergecalculate',
1020 1020 [
1021 1021 (b'r', b'rev', b'.', b'rev to merge against'),
1022 1022 (b'', b'from', b'', b'rev to merge from'),
1023 1023 (b'', b'base', b'', b'the revision to use as base'),
1024 1024 ] + formatteropts)
1025 1025 def perfmergecalculate(ui, repo, **opts):
1026 1026 opts = _byteskwargs(opts)
1027 1027 timer, fm = gettimer(ui, opts)
1028 1028
1029 1029 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1030 1030 def d():
1031 1031 # acceptremote is True because we don't want prompts in the middle of
1032 1032 # our benchmark
1033 1033 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
1034 1034 acceptremote=True, followcopies=True)
1035 1035 timer(d)
1036 1036 fm.end()
1037 1037
1038 1038 @command(b'perfmergecopies',
1039 1039 [
1040 1040 (b'r', b'rev', b'.', b'rev to merge against'),
1041 1041 (b'', b'from', b'', b'rev to merge from'),
1042 1042 (b'', b'base', b'', b'the revision to use as base'),
1043 1043 ] + formatteropts)
1044 1044 def perfmergecopies(ui, repo, **opts):
1045 1045 """measure runtime of `copies.mergecopies`"""
1046 1046 opts = _byteskwargs(opts)
1047 1047 timer, fm = gettimer(ui, opts)
1048 1048 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1049 1049 def d():
1050 1050 # acceptremote is True because we don't want prompts in the middle of
1051 1051 # our benchmark
1052 1052 copies.mergecopies(repo, wctx, rctx, ancestor)
1053 1053 timer(d)
1054 1054 fm.end()
1055 1055
1056 1056 @command(b'perfpathcopies', [], b"REV REV")
1057 1057 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1058 1058 """benchmark the copy tracing logic"""
1059 1059 opts = _byteskwargs(opts)
1060 1060 timer, fm = gettimer(ui, opts)
1061 1061 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1062 1062 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1063 1063 def d():
1064 1064 copies.pathcopies(ctx1, ctx2)
1065 1065 timer(d)
1066 1066 fm.end()
1067 1067
1068 1068 @command(b'perfphases',
1069 1069 [(b'', b'full', False, b'include file reading time too'),
1070 1070 ], b"")
1071 1071 def perfphases(ui, repo, **opts):
1072 1072 """benchmark phasesets computation"""
1073 1073 opts = _byteskwargs(opts)
1074 1074 timer, fm = gettimer(ui, opts)
1075 1075 _phases = repo._phasecache
1076 1076 full = opts.get(b'full')
1077 1077 def d():
1078 1078 phases = _phases
1079 1079 if full:
1080 1080 clearfilecache(repo, b'_phasecache')
1081 1081 phases = repo._phasecache
1082 1082 phases.invalidate()
1083 1083 phases.loadphaserevs(repo)
1084 1084 timer(d)
1085 1085 fm.end()
1086 1086
1087 1087 @command(b'perfphasesremote',
1088 1088 [], b"[DEST]")
1089 1089 def perfphasesremote(ui, repo, dest=None, **opts):
1090 1090 """benchmark time needed to analyse phases of the remote server"""
1091 1091 from mercurial.node import (
1092 1092 bin,
1093 1093 )
1094 1094 from mercurial import (
1095 1095 exchange,
1096 1096 hg,
1097 1097 phases,
1098 1098 )
1099 1099 opts = _byteskwargs(opts)
1100 1100 timer, fm = gettimer(ui, opts)
1101 1101
1102 1102 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
1103 1103 if not path:
1104 1104 raise error.Abort((b'default repository not configured!'),
1105 1105 hint=(b"see 'hg help config.paths'"))
1106 1106 dest = path.pushloc or path.loc
1107 1107 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
1108 1108 other = hg.peer(repo, opts, dest)
1109 1109
1110 1110 # easier to perform discovery through the operation
1111 1111 op = exchange.pushoperation(repo, other)
1112 1112 exchange._pushdiscoverychangeset(op)
1113 1113
1114 1114 remotesubset = op.fallbackheads
1115 1115
1116 1116 with other.commandexecutor() as e:
1117 1117 remotephases = e.callcommand(b'listkeys',
1118 1118 {b'namespace': b'phases'}).result()
1119 1119 del other
1120 1120 publishing = remotephases.get(b'publishing', False)
1121 1121 if publishing:
1122 1122 ui.status((b'publishing: yes\n'))
1123 1123 else:
1124 1124 ui.status((b'publishing: no\n'))
1125 1125
1126 1126 nodemap = repo.changelog.nodemap
1127 1127 nonpublishroots = 0
1128 1128 for nhex, phase in remotephases.iteritems():
1129 1129 if nhex == b'publishing': # ignore data related to publish option
1130 1130 continue
1131 1131 node = bin(nhex)
1132 1132 if node in nodemap and int(phase):
1133 1133 nonpublishroots += 1
1134 1134 ui.status((b'number of roots: %d\n') % len(remotephases))
1135 1135 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
1136 1136 def d():
1137 1137 phases.remotephasessummary(repo,
1138 1138 remotesubset,
1139 1139 remotephases)
1140 1140 timer(d)
1141 1141 fm.end()
1142 1142
1143 1143 @command(b'perfmanifest',[
1144 1144 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1145 1145 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1146 1146 ] + formatteropts, b'REV|NODE')
1147 1147 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1148 1148 """benchmark the time to read a manifest from disk and return a usable
1149 1149 dict-like object
1150 1150
1151 1151 Manifest caches are cleared before retrieval."""
1152 1152 opts = _byteskwargs(opts)
1153 1153 timer, fm = gettimer(ui, opts)
1154 1154 if not manifest_rev:
1155 1155 ctx = scmutil.revsingle(repo, rev, rev)
1156 1156 t = ctx.manifestnode()
1157 1157 else:
1158 1158 from mercurial.node import bin
1159 1159
1160 1160 if len(rev) == 40:
1161 1161 t = bin(rev)
1162 1162 else:
1163 1163 try:
1164 1164 rev = int(rev)
1165 1165
1166 1166 if util.safehasattr(repo.manifestlog, b'getstorage'):
1167 1167 t = repo.manifestlog.getstorage(b'').node(rev)
1168 1168 else:
1169 1169 t = repo.manifestlog._revlog.lookup(rev)
1170 1170 except ValueError:
1171 1171 raise error.Abort(b'manifest revision must be integer or full '
1172 1172 b'node')
1173 1173 def d():
1174 1174 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1175 1175 repo.manifestlog[t].read()
1176 1176 timer(d)
1177 1177 fm.end()
1178 1178
1179 1179 @command(b'perfchangeset', formatteropts)
1180 1180 def perfchangeset(ui, repo, rev, **opts):
1181 1181 opts = _byteskwargs(opts)
1182 1182 timer, fm = gettimer(ui, opts)
1183 1183 n = scmutil.revsingle(repo, rev).node()
1184 1184 def d():
1185 1185 repo.changelog.read(n)
1186 1186 #repo.changelog._cache = None
1187 1187 timer(d)
1188 1188 fm.end()
1189 1189
1190 1190 @command(b'perfignore', formatteropts)
1191 1191 def perfignore(ui, repo, **opts):
1192 1192 """benchmark operation related to computing ignore"""
1193 1193 opts = _byteskwargs(opts)
1194 1194 timer, fm = gettimer(ui, opts)
1195 1195 dirstate = repo.dirstate
1196 1196
1197 1197 def setupone():
1198 1198 dirstate.invalidate()
1199 1199 clearfilecache(dirstate, b'_ignore')
1200 1200
1201 1201 def runone():
1202 1202 dirstate._ignore
1203 1203
1204 1204 timer(runone, setup=setupone, title=b"load")
1205 1205 fm.end()
1206 1206
1207 1207 @command(b'perfindex', [
1208 1208 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1209 1209 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1210 1210 ] + formatteropts)
1211 1211 def perfindex(ui, repo, **opts):
1212 1212 """benchmark index creation time followed by a lookup
1213 1213
1214 1214 The default is to look `tip` up. Depending on the index implementation,
1215 1215 the revision looked up can matters. For example, an implementation
1216 1216 scanning the index will have a faster lookup time for `--rev tip` than for
1217 1217 `--rev 0`. The number of looked up revisions and their order can also
1218 1218 matters.
1219 1219
1220 1220 Example of useful set to test:
1221 1221 * tip
1222 1222 * 0
1223 1223 * -10:
1224 1224 * :10
1225 1225 * -10: + :10
1226 1226 * :10: + -10:
1227 1227 * -10000:
1228 1228 * -10000: + 0
1229 1229
1230 1230 It is not currently possible to check for lookup of a missing node. For
1231 1231 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1232 1232 import mercurial.revlog
1233 1233 opts = _byteskwargs(opts)
1234 1234 timer, fm = gettimer(ui, opts)
1235 1235 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1236 1236 if opts[b'no_lookup']:
1237 1237 if opts['rev']:
1238 1238 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1239 1239 nodes = []
1240 1240 elif not opts[b'rev']:
1241 1241 nodes = [repo[b"tip"].node()]
1242 1242 else:
1243 1243 revs = scmutil.revrange(repo, opts[b'rev'])
1244 1244 cl = repo.changelog
1245 1245 nodes = [cl.node(r) for r in revs]
1246 1246
1247 1247 unfi = repo.unfiltered()
1248 1248 # find the filecache func directly
1249 1249 # This avoid polluting the benchmark with the filecache logic
1250 1250 makecl = unfi.__class__.changelog.func
1251 1251 def setup():
1252 1252 # probably not necessary, but for good measure
1253 1253 clearchangelog(unfi)
1254 1254 def d():
1255 1255 cl = makecl(unfi)
1256 1256 for n in nodes:
1257 1257 cl.rev(n)
1258 1258 timer(d, setup=setup)
1259 1259 fm.end()
1260 1260
1261 1261 @command(b'perfnodemap', [
1262 1262 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1263 1263 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1264 1264 ] + formatteropts)
1265 1265 def perfnodemap(ui, repo, **opts):
1266 1266 """benchmark the time necessary to look up revision from a cold nodemap
1267 1267
1268 1268 Depending on the implementation, the amount and order of revision we look
1269 1269 up can varies. Example of useful set to test:
1270 1270 * tip
1271 1271 * 0
1272 1272 * -10:
1273 1273 * :10
1274 1274 * -10: + :10
1275 1275 * :10: + -10:
1276 1276 * -10000:
1277 1277 * -10000: + 0
1278 1278
1279 1279 The command currently focus on valid binary lookup. Benchmarking for
1280 1280 hexlookup, prefix lookup and missing lookup would also be valuable.
1281 1281 """
1282 1282 import mercurial.revlog
1283 1283 opts = _byteskwargs(opts)
1284 1284 timer, fm = gettimer(ui, opts)
1285 1285 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1286 1286
1287 1287 unfi = repo.unfiltered()
1288 1288 clearcaches = opts['clear_caches']
1289 1289 # find the filecache func directly
1290 1290 # This avoid polluting the benchmark with the filecache logic
1291 1291 makecl = unfi.__class__.changelog.func
1292 1292 if not opts[b'rev']:
1293 1293 raise error.Abort('use --rev to specify revisions to look up')
1294 1294 revs = scmutil.revrange(repo, opts[b'rev'])
1295 1295 cl = repo.changelog
1296 1296 nodes = [cl.node(r) for r in revs]
1297 1297
1298 1298 # use a list to pass reference to a nodemap from one closure to the next
1299 1299 nodeget = [None]
1300 1300 def setnodeget():
1301 1301 # probably not necessary, but for good measure
1302 1302 clearchangelog(unfi)
1303 1303 nodeget[0] = makecl(unfi).nodemap.get
1304 1304
1305 1305 def d():
1306 1306 get = nodeget[0]
1307 1307 for n in nodes:
1308 1308 get(n)
1309 1309
1310 1310 setup = None
1311 1311 if clearcaches:
1312 1312 def setup():
1313 1313 setnodeget()
1314 1314 else:
1315 1315 setnodeget()
1316 1316 d() # prewarm the data structure
1317 1317 timer(d, setup=setup)
1318 1318 fm.end()
1319 1319
1320 1320 @command(b'perfstartup', formatteropts)
1321 1321 def perfstartup(ui, repo, **opts):
1322 1322 opts = _byteskwargs(opts)
1323 1323 timer, fm = gettimer(ui, opts)
1324 1324 def d():
1325 1325 if os.name != r'nt':
1326 1326 os.system(b"HGRCPATH= %s version -q > /dev/null" %
1327 1327 fsencode(sys.argv[0]))
1328 1328 else:
1329 1329 os.environ[r'HGRCPATH'] = r' '
1330 1330 os.system(r"%s version -q > NUL" % sys.argv[0])
1331 1331 timer(d)
1332 1332 fm.end()
1333 1333
1334 1334 @command(b'perfparents', formatteropts)
1335 1335 def perfparents(ui, repo, **opts):
1336 1336 """benchmark the time necessary to fetch one changeset's parents.
1337 1337
1338 1338 The fetch is done using the `node identifier`, traversing all object layers
1339 1339 from the repository object. The first N revisions will be used for this
1340 1340 benchmark. N is controlled by the ``perf.parentscount`` config option
1341 1341 (default: 1000).
1342 1342 """
1343 1343 opts = _byteskwargs(opts)
1344 1344 timer, fm = gettimer(ui, opts)
1345 1345 # control the number of commits perfparents iterates over
1346 1346 # experimental config: perf.parentscount
1347 1347 count = getint(ui, b"perf", b"parentscount", 1000)
1348 1348 if len(repo.changelog) < count:
1349 1349 raise error.Abort(b"repo needs %d commits for this test" % count)
1350 1350 repo = repo.unfiltered()
1351 1351 nl = [repo.changelog.node(i) for i in _xrange(count)]
1352 1352 def d():
1353 1353 for n in nl:
1354 1354 repo.changelog.parents(n)
1355 1355 timer(d)
1356 1356 fm.end()
1357 1357
1358 1358 @command(b'perfctxfiles', formatteropts)
1359 1359 def perfctxfiles(ui, repo, x, **opts):
1360 1360 opts = _byteskwargs(opts)
1361 1361 x = int(x)
1362 1362 timer, fm = gettimer(ui, opts)
1363 1363 def d():
1364 1364 len(repo[x].files())
1365 1365 timer(d)
1366 1366 fm.end()
1367 1367
1368 1368 @command(b'perfrawfiles', formatteropts)
1369 1369 def perfrawfiles(ui, repo, x, **opts):
1370 1370 opts = _byteskwargs(opts)
1371 1371 x = int(x)
1372 1372 timer, fm = gettimer(ui, opts)
1373 1373 cl = repo.changelog
1374 1374 def d():
1375 1375 len(cl.read(x)[3])
1376 1376 timer(d)
1377 1377 fm.end()
1378 1378
1379 1379 @command(b'perflookup', formatteropts)
1380 1380 def perflookup(ui, repo, rev, **opts):
1381 1381 opts = _byteskwargs(opts)
1382 1382 timer, fm = gettimer(ui, opts)
1383 1383 timer(lambda: len(repo.lookup(rev)))
1384 1384 fm.end()
1385 1385
1386 1386 @command(b'perflinelogedits',
1387 1387 [(b'n', b'edits', 10000, b'number of edits'),
1388 1388 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1389 1389 ], norepo=True)
1390 1390 def perflinelogedits(ui, **opts):
1391 1391 from mercurial import linelog
1392 1392
1393 1393 opts = _byteskwargs(opts)
1394 1394
1395 1395 edits = opts[b'edits']
1396 1396 maxhunklines = opts[b'max_hunk_lines']
1397 1397
1398 1398 maxb1 = 100000
1399 1399 random.seed(0)
1400 1400 randint = random.randint
1401 1401 currentlines = 0
1402 1402 arglist = []
1403 1403 for rev in _xrange(edits):
1404 1404 a1 = randint(0, currentlines)
1405 1405 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1406 1406 b1 = randint(0, maxb1)
1407 1407 b2 = randint(b1, b1 + maxhunklines)
1408 1408 currentlines += (b2 - b1) - (a2 - a1)
1409 1409 arglist.append((rev, a1, a2, b1, b2))
1410 1410
1411 1411 def d():
1412 1412 ll = linelog.linelog()
1413 1413 for args in arglist:
1414 1414 ll.replacelines(*args)
1415 1415
1416 1416 timer, fm = gettimer(ui, opts)
1417 1417 timer(d)
1418 1418 fm.end()
1419 1419
1420 1420 @command(b'perfrevrange', formatteropts)
1421 1421 def perfrevrange(ui, repo, *specs, **opts):
1422 1422 opts = _byteskwargs(opts)
1423 1423 timer, fm = gettimer(ui, opts)
1424 1424 revrange = scmutil.revrange
1425 1425 timer(lambda: len(revrange(repo, specs)))
1426 1426 fm.end()
1427 1427
1428 1428 @command(b'perfnodelookup', formatteropts)
1429 1429 def perfnodelookup(ui, repo, rev, **opts):
1430 1430 opts = _byteskwargs(opts)
1431 1431 timer, fm = gettimer(ui, opts)
1432 1432 import mercurial.revlog
1433 1433 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1434 1434 n = scmutil.revsingle(repo, rev).node()
1435 1435 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1436 1436 def d():
1437 1437 cl.rev(n)
1438 1438 clearcaches(cl)
1439 1439 timer(d)
1440 1440 fm.end()
1441 1441
1442 1442 @command(b'perflog',
1443 1443 [(b'', b'rename', False, b'ask log to follow renames')
1444 1444 ] + formatteropts)
1445 1445 def perflog(ui, repo, rev=None, **opts):
1446 1446 opts = _byteskwargs(opts)
1447 1447 if rev is None:
1448 1448 rev=[]
1449 1449 timer, fm = gettimer(ui, opts)
1450 1450 ui.pushbuffer()
1451 1451 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1452 1452 copies=opts.get(b'rename')))
1453 1453 ui.popbuffer()
1454 1454 fm.end()
1455 1455
1456 1456 @command(b'perfmoonwalk', formatteropts)
1457 1457 def perfmoonwalk(ui, repo, **opts):
1458 1458 """benchmark walking the changelog backwards
1459 1459
1460 1460 This also loads the changelog data for each revision in the changelog.
1461 1461 """
1462 1462 opts = _byteskwargs(opts)
1463 1463 timer, fm = gettimer(ui, opts)
1464 1464 def moonwalk():
1465 1465 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1466 1466 ctx = repo[i]
1467 1467 ctx.branch() # read changelog data (in addition to the index)
1468 1468 timer(moonwalk)
1469 1469 fm.end()
1470 1470
1471 1471 @command(b'perftemplating',
1472 1472 [(b'r', b'rev', [], b'revisions to run the template on'),
1473 1473 ] + formatteropts)
1474 1474 def perftemplating(ui, repo, testedtemplate=None, **opts):
1475 1475 """test the rendering time of a given template"""
1476 1476 if makelogtemplater is None:
1477 1477 raise error.Abort((b"perftemplating not available with this Mercurial"),
1478 1478 hint=b"use 4.3 or later")
1479 1479
1480 1480 opts = _byteskwargs(opts)
1481 1481
1482 1482 nullui = ui.copy()
1483 1483 nullui.fout = open(os.devnull, r'wb')
1484 1484 nullui.disablepager()
1485 1485 revs = opts.get(b'rev')
1486 1486 if not revs:
1487 1487 revs = [b'all()']
1488 1488 revs = list(scmutil.revrange(repo, revs))
1489 1489
1490 1490 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1491 1491 b' {author|person}: {desc|firstline}\n')
1492 1492 if testedtemplate is None:
1493 1493 testedtemplate = defaulttemplate
1494 1494 displayer = makelogtemplater(nullui, repo, testedtemplate)
1495 1495 def format():
1496 1496 for r in revs:
1497 1497 ctx = repo[r]
1498 1498 displayer.show(ctx)
1499 1499 displayer.flush(ctx)
1500 1500
1501 1501 timer, fm = gettimer(ui, opts)
1502 1502 timer(format)
1503 1503 fm.end()
1504 1504
1505 1505 def _displaystats(ui, opts, entries, data):
1506 1506 pass
1507 1507 # use a second formatter because the data are quite different, not sure
1508 1508 # how it flies with the templater.
1509 1509 fm = ui.formatter(b'perf-stats', opts)
1510 1510 for key, title in entries:
1511 1511 values = data[key]
1512 1512 nbvalues = len(data)
1513 1513 values.sort()
1514 1514 stats = {
1515 1515 'key': key,
1516 1516 'title': title,
1517 1517 'nbitems': len(values),
1518 1518 'min': values[0][0],
1519 1519 '10%': values[(nbvalues * 10) // 100][0],
1520 1520 '25%': values[(nbvalues * 25) // 100][0],
1521 1521 '50%': values[(nbvalues * 50) // 100][0],
1522 1522 '75%': values[(nbvalues * 75) // 100][0],
1523 1523 '80%': values[(nbvalues * 80) // 100][0],
1524 1524 '85%': values[(nbvalues * 85) // 100][0],
1525 1525 '90%': values[(nbvalues * 90) // 100][0],
1526 1526 '95%': values[(nbvalues * 95) // 100][0],
1527 1527 '99%': values[(nbvalues * 99) // 100][0],
1528 1528 'max': values[-1][0],
1529 1529 }
1530 1530 fm.startitem()
1531 1531 fm.data(**stats)
1532 1532 # make node pretty for the human output
1533 1533 fm.plain('### %s (%d items)\n' % (title, len(values)))
1534 1534 lines = [
1535 1535 'min',
1536 1536 '10%',
1537 1537 '25%',
1538 1538 '50%',
1539 1539 '75%',
1540 1540 '80%',
1541 1541 '85%',
1542 1542 '90%',
1543 1543 '95%',
1544 1544 '99%',
1545 1545 'max',
1546 1546 ]
1547 1547 for l in lines:
1548 1548 fm.plain('%s: %s\n' % (l, stats[l]))
1549 1549 fm.end()
1550 1550
1551 1551 @command(b'perfhelper-mergecopies', formatteropts +
1552 1552 [
1553 1553 (b'r', b'revs', [], b'restrict search to these revisions'),
1554 1554 (b'', b'timing', False, b'provides extra data (costly)'),
1555 1555 (b'', b'stats', False, b'provides statistic about the measured data'),
1556 1556 ])
1557 1557 def perfhelpermergecopies(ui, repo, revs=[], **opts):
1558 1558 """find statistics about potential parameters for `perfmergecopies`
1559 1559
1560 1560 This command find (base, p1, p2) triplet relevant for copytracing
1561 1561 benchmarking in the context of a merge. It reports values for some of the
1562 1562 parameters that impact merge copy tracing time during merge.
1563 1563
1564 1564 If `--timing` is set, rename detection is run and the associated timing
1565 1565 will be reported. The extra details come at the cost of slower command
1566 1566 execution.
1567 1567
1568 1568 Since rename detection is only run once, other factors might easily
1569 1569 affect the precision of the timing. However it should give a good
1570 1570 approximation of which revision triplets are very costly.
1571 1571 """
1572 1572 opts = _byteskwargs(opts)
1573 1573 fm = ui.formatter(b'perf', opts)
1574 1574 dotiming = opts[b'timing']
1575 1575 dostats = opts[b'stats']
1576 1576
1577 1577 output_template = [
1578 1578 ("base", "%(base)12s"),
1579 1579 ("p1", "%(p1.node)12s"),
1580 1580 ("p2", "%(p2.node)12s"),
1581 1581 ("p1.nb-revs", "%(p1.nbrevs)12d"),
1582 1582 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
1583 1583 ("p1.renames", "%(p1.renamedfiles)12d"),
1584 1584 ("p1.time", "%(p1.time)12.3f"),
1585 1585 ("p2.nb-revs", "%(p2.nbrevs)12d"),
1586 1586 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
1587 1587 ("p2.renames", "%(p2.renamedfiles)12d"),
1588 1588 ("p2.time", "%(p2.time)12.3f"),
1589 1589 ("renames", "%(nbrenamedfiles)12d"),
1590 1590 ("total.time", "%(time)12.3f"),
1591 1591 ]
1592 1592 if not dotiming:
1593 1593 output_template = [i for i in output_template
1594 1594 if not ('time' in i[0] or 'renames' in i[0])]
1595 1595 header_names = [h for (h, v) in output_template]
1596 1596 output = ' '.join([v for (h, v) in output_template]) + '\n'
1597 1597 header = ' '.join(['%12s'] * len(header_names)) + '\n'
1598 1598 fm.plain(header % tuple(header_names))
1599 1599
1600 1600 if not revs:
1601 1601 revs = ['all()']
1602 1602 revs = scmutil.revrange(repo, revs)
1603 1603
1604 1604 if dostats:
1605 1605 alldata = {
1606 1606 'nbrevs': [],
1607 1607 'nbmissingfiles': [],
1608 1608 }
1609 1609 if dotiming:
1610 1610 alldata['parentnbrenames'] = []
1611 1611 alldata['totalnbrenames'] = []
1612 1612 alldata['parenttime'] = []
1613 1613 alldata['totaltime'] = []
1614 1614
1615 1615 roi = repo.revs('merge() and %ld', revs)
1616 1616 for r in roi:
1617 1617 ctx = repo[r]
1618 1618 p1 = ctx.p1()
1619 1619 p2 = ctx.p2()
1620 1620 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
1621 1621 for b in bases:
1622 1622 b = repo[b]
1623 1623 p1missing = copies._computeforwardmissing(b, p1)
1624 1624 p2missing = copies._computeforwardmissing(b, p2)
1625 1625 data = {
1626 1626 b'base': b.hex(),
1627 1627 b'p1.node': p1.hex(),
1628 1628 b'p1.nbrevs': len(repo.revs('%d::%d', b.rev(), p1.rev())),
1629 1629 b'p1.nbmissingfiles': len(p1missing),
1630 1630 b'p2.node': p2.hex(),
1631 1631 b'p2.nbrevs': len(repo.revs('%d::%d', b.rev(), p2.rev())),
1632 1632 b'p2.nbmissingfiles': len(p2missing),
1633 1633 }
1634 1634 if dostats:
1635 1635 if p1missing:
1636 1636 alldata['nbrevs'].append((
1637 1637 data['p1.nbrevs'],
1638 1638 b.hex(),
1639 1639 p1.hex()
1640 1640 ))
1641 1641 alldata['nbmissingfiles'].append((
1642 1642 data['p1.nbmissingfiles'],
1643 1643 b.hex(),
1644 1644 p1.hex()
1645 1645 ))
1646 1646 if p2missing:
1647 1647 alldata['nbrevs'].append((
1648 1648 data['p2.nbrevs'],
1649 1649 b.hex(),
1650 1650 p2.hex()
1651 1651 ))
1652 1652 alldata['nbmissingfiles'].append((
1653 1653 data['p2.nbmissingfiles'],
1654 1654 b.hex(),
1655 1655 p2.hex()
1656 1656 ))
1657 1657 if dotiming:
1658 1658 begin = util.timer()
1659 1659 mergedata = copies.mergecopies(repo, p1, p2, b)
1660 1660 end = util.timer()
1661 1661 # not very stable timing since we did only one run
1662 1662 data['time'] = end - begin
1663 1663 # mergedata contains five dicts: "copy", "movewithdir",
1664 1664 # "diverge", "renamedelete" and "dirmove".
1665 1665 # The first 4 are about renamed file so lets count that.
1666 1666 renames = len(mergedata[0])
1667 1667 renames += len(mergedata[1])
1668 1668 renames += len(mergedata[2])
1669 1669 renames += len(mergedata[3])
1670 1670 data['nbrenamedfiles'] = renames
1671 1671 begin = util.timer()
1672 1672 p1renames = copies.pathcopies(b, p1)
1673 1673 end = util.timer()
1674 1674 data['p1.time'] = end - begin
1675 1675 begin = util.timer()
1676 1676 p2renames = copies.pathcopies(b, p2)
1677 1677 data['p2.time'] = end - begin
1678 1678 end = util.timer()
1679 1679 data['p1.renamedfiles'] = len(p1renames)
1680 1680 data['p2.renamedfiles'] = len(p2renames)
1681 1681
1682 1682 if dostats:
1683 1683 if p1missing:
1684 1684 alldata['parentnbrenames'].append((
1685 1685 data['p1.renamedfiles'],
1686 1686 b.hex(),
1687 1687 p1.hex()
1688 1688 ))
1689 1689 alldata['parenttime'].append((
1690 1690 data['p1.time'],
1691 1691 b.hex(),
1692 1692 p1.hex()
1693 1693 ))
1694 1694 if p2missing:
1695 1695 alldata['parentnbrenames'].append((
1696 1696 data['p2.renamedfiles'],
1697 1697 b.hex(),
1698 1698 p2.hex()
1699 1699 ))
1700 1700 alldata['parenttime'].append((
1701 1701 data['p2.time'],
1702 1702 b.hex(),
1703 1703 p2.hex()
1704 1704 ))
1705 1705 if p1missing or p2missing:
1706 1706 alldata['totalnbrenames'].append((
1707 1707 data['nbrenamedfiles'],
1708 1708 b.hex(),
1709 1709 p1.hex(),
1710 1710 p2.hex()
1711 1711 ))
1712 1712 alldata['totaltime'].append((
1713 1713 data['time'],
1714 1714 b.hex(),
1715 1715 p1.hex(),
1716 1716 p2.hex()
1717 1717 ))
1718 1718 fm.startitem()
1719 1719 fm.data(**data)
1720 1720 # make node pretty for the human output
1721 1721 out = data.copy()
1722 1722 out['base'] = fm.hexfunc(b.node())
1723 1723 out['p1.node'] = fm.hexfunc(p1.node())
1724 1724 out['p2.node'] = fm.hexfunc(p2.node())
1725 1725 fm.plain(output % out)
1726 1726
1727 1727 fm.end()
1728 1728 if dostats:
1729 1729 # use a second formatter because the data are quite different, not sure
1730 1730 # how it flies with the templater.
1731 1731 entries = [
1732 1732 ('nbrevs', 'number of revision covered'),
1733 1733 ('nbmissingfiles', 'number of missing files at head'),
1734 1734 ]
1735 1735 if dotiming:
1736 1736 entries.append(('parentnbrenames',
1737 1737 'rename from one parent to base'))
1738 1738 entries.append(('totalnbrenames', 'total number of renames'))
1739 1739 entries.append(('parenttime', 'time for one parent'))
1740 1740 entries.append(('totaltime', 'time for both parents'))
1741 1741 _displaystats(ui, opts, entries, alldata)
1742 1742
1743 1743
1744 1744 @command(b'perfhelper-pathcopies', formatteropts +
1745 1745 [
1746 1746 (b'r', b'revs', [], b'restrict search to these revisions'),
1747 1747 (b'', b'timing', False, b'provides extra data (costly)'),
1748 (b'', b'stats', False, b'provides statistic about the measured data'),
1748 1749 ])
1749 1750 def perfhelperpathcopies(ui, repo, revs=[], **opts):
1750 1751 """find statistic about potential parameters for the `perftracecopies`
1751 1752
1752 1753 This command find source-destination pair relevant for copytracing testing.
1753 1754 It report value for some of the parameters that impact copy tracing time.
1754 1755
1755 1756 If `--timing` is set, rename detection is run and the associated timing
1756 1757 will be reported. The extra details comes at the cost of a slower command
1757 1758 execution.
1758 1759
1759 1760 Since the rename detection is only run once, other factors might easily
1760 1761 affect the precision of the timing. However it should give a good
1761 1762 approximation of which revision pairs are very costly.
1762 1763 """
1763 1764 opts = _byteskwargs(opts)
1764 1765 fm = ui.formatter(b'perf', opts)
1765 1766 dotiming = opts[b'timing']
1767 dostats = opts[b'stats']
1766 1768
1767 1769 if dotiming:
1768 1770 header = '%12s %12s %12s %12s %12s %12s\n'
1769 1771 output = ("%(source)12s %(destination)12s "
1770 1772 "%(nbrevs)12d %(nbmissingfiles)12d "
1771 1773 "%(nbrenamedfiles)12d %(time)18.5f\n")
1772 1774 header_names = ("source", "destination", "nb-revs", "nb-files",
1773 1775 "nb-renames", "time")
1774 1776 fm.plain(header % header_names)
1775 1777 else:
1776 1778 header = '%12s %12s %12s %12s\n'
1777 1779 output = ("%(source)12s %(destination)12s "
1778 1780 "%(nbrevs)12d %(nbmissingfiles)12d\n")
1779 1781 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
1780 1782
1781 1783 if not revs:
1782 1784 revs = ['all()']
1783 1785 revs = scmutil.revrange(repo, revs)
1784 1786
1787
1788 if dostats:
1789 alldata = {
1790 'nbrevs': [],
1791 'nbmissingfiles': [],
1792 }
1793 if dotiming:
1794 alldata['nbrenames'] = []
1795 alldata['time'] = []
1796
1785 1797 roi = repo.revs('merge() and %ld', revs)
1786 1798 for r in roi:
1787 1799 ctx = repo[r]
1788 1800 p1 = ctx.p1().rev()
1789 1801 p2 = ctx.p2().rev()
1790 1802 bases = repo.changelog._commonancestorsheads(p1, p2)
1791 1803 for p in (p1, p2):
1792 1804 for b in bases:
1793 1805 base = repo[b]
1794 1806 parent = repo[p]
1795 1807 missing = copies._computeforwardmissing(base, parent)
1796 1808 if not missing:
1797 1809 continue
1798 1810 data = {
1799 1811 b'source': base.hex(),
1800 1812 b'destination': parent.hex(),
1801 1813 b'nbrevs': len(repo.revs('%d::%d', b, p)),
1802 1814 b'nbmissingfiles': len(missing),
1803 1815 }
1816 alldata['nbrevs'].append((
1817 data['nbrevs'],
1818 base.hex(),
1819 parent.hex(),
1820 ))
1821 alldata['nbmissingfiles'].append((
1822 data['nbmissingfiles'],
1823 base.hex(),
1824 parent.hex(),
1825 ))
1804 1826 if dotiming:
1805 1827 begin = util.timer()
1806 1828 renames = copies.pathcopies(base, parent)
1807 1829 end = util.timer()
1808 1830 # not very stable timing since we did only one run
1809 1831 data['time'] = end - begin
1810 1832 data['nbrenamedfiles'] = len(renames)
1833 alldata['time'].append((
1834 data['time'],
1835 base.hex(),
1836 parent.hex(),
1837 ))
1838 alldata['nbrenames'].append((
1839 data['nbrenamedfiles'],
1840 base.hex(),
1841 parent.hex(),
1842 ))
1811 1843 fm.startitem()
1812 1844 fm.data(**data)
1813 1845 out = data.copy()
1814 1846 out['source'] = fm.hexfunc(base.node())
1815 1847 out['destination'] = fm.hexfunc(parent.node())
1816 1848 fm.plain(output % out)
1817 1849
1818 1850 fm.end()
1851 if dostats:
1852 # use a second formatter because the data are quite different, not sure
1853 # how it flies with the templater.
1854 fm = ui.formatter(b'perf', opts)
1855 entries = [
1856 ('nbrevs', 'number of revision covered'),
1857 ('nbmissingfiles', 'number of missing files at head'),
1858 ]
1859 if dotiming:
1860 entries.append(('nbrenames',
1861 'renamed files'))
1862 entries.append(('time', 'time'))
1863 _displaystats(ui, opts, entries, alldata)
1819 1864
1820 1865 @command(b'perfcca', formatteropts)
1821 1866 def perfcca(ui, repo, **opts):
1822 1867 opts = _byteskwargs(opts)
1823 1868 timer, fm = gettimer(ui, opts)
1824 1869 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1825 1870 fm.end()
1826 1871
1827 1872 @command(b'perffncacheload', formatteropts)
1828 1873 def perffncacheload(ui, repo, **opts):
1829 1874 opts = _byteskwargs(opts)
1830 1875 timer, fm = gettimer(ui, opts)
1831 1876 s = repo.store
1832 1877 def d():
1833 1878 s.fncache._load()
1834 1879 timer(d)
1835 1880 fm.end()
1836 1881
1837 1882 @command(b'perffncachewrite', formatteropts)
1838 1883 def perffncachewrite(ui, repo, **opts):
1839 1884 opts = _byteskwargs(opts)
1840 1885 timer, fm = gettimer(ui, opts)
1841 1886 s = repo.store
1842 1887 lock = repo.lock()
1843 1888 s.fncache._load()
1844 1889 tr = repo.transaction(b'perffncachewrite')
1845 1890 tr.addbackup(b'fncache')
1846 1891 def d():
1847 1892 s.fncache._dirty = True
1848 1893 s.fncache.write(tr)
1849 1894 timer(d)
1850 1895 tr.close()
1851 1896 lock.release()
1852 1897 fm.end()
1853 1898
1854 1899 @command(b'perffncacheencode', formatteropts)
1855 1900 def perffncacheencode(ui, repo, **opts):
1856 1901 opts = _byteskwargs(opts)
1857 1902 timer, fm = gettimer(ui, opts)
1858 1903 s = repo.store
1859 1904 s.fncache._load()
1860 1905 def d():
1861 1906 for p in s.fncache.entries:
1862 1907 s.encode(p)
1863 1908 timer(d)
1864 1909 fm.end()
1865 1910
1866 1911 def _bdiffworker(q, blocks, xdiff, ready, done):
1867 1912 while not done.is_set():
1868 1913 pair = q.get()
1869 1914 while pair is not None:
1870 1915 if xdiff:
1871 1916 mdiff.bdiff.xdiffblocks(*pair)
1872 1917 elif blocks:
1873 1918 mdiff.bdiff.blocks(*pair)
1874 1919 else:
1875 1920 mdiff.textdiff(*pair)
1876 1921 q.task_done()
1877 1922 pair = q.get()
1878 1923 q.task_done() # for the None one
1879 1924 with ready:
1880 1925 ready.wait()
1881 1926
1882 1927 def _manifestrevision(repo, mnode):
1883 1928 ml = repo.manifestlog
1884 1929
1885 1930 if util.safehasattr(ml, b'getstorage'):
1886 1931 store = ml.getstorage(b'')
1887 1932 else:
1888 1933 store = ml._revlog
1889 1934
1890 1935 return store.revision(mnode)
1891 1936
1892 1937 @command(b'perfbdiff', revlogopts + formatteropts + [
1893 1938 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1894 1939 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1895 1940 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1896 1941 (b'', b'blocks', False, b'test computing diffs into blocks'),
1897 1942 (b'', b'xdiff', False, b'use xdiff algorithm'),
1898 1943 ],
1899 1944
1900 1945 b'-c|-m|FILE REV')
1901 1946 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1902 1947 """benchmark a bdiff between revisions
1903 1948
1904 1949 By default, benchmark a bdiff between its delta parent and itself.
1905 1950
1906 1951 With ``--count``, benchmark bdiffs between delta parents and self for N
1907 1952 revisions starting at the specified revision.
1908 1953
1909 1954 With ``--alldata``, assume the requested revision is a changeset and
1910 1955 measure bdiffs for all changes related to that changeset (manifest
1911 1956 and filelogs).
1912 1957 """
1913 1958 opts = _byteskwargs(opts)
1914 1959
1915 1960 if opts[b'xdiff'] and not opts[b'blocks']:
1916 1961 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1917 1962
1918 1963 if opts[b'alldata']:
1919 1964 opts[b'changelog'] = True
1920 1965
1921 1966 if opts.get(b'changelog') or opts.get(b'manifest'):
1922 1967 file_, rev = None, file_
1923 1968 elif rev is None:
1924 1969 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1925 1970
1926 1971 blocks = opts[b'blocks']
1927 1972 xdiff = opts[b'xdiff']
1928 1973 textpairs = []
1929 1974
1930 1975 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1931 1976
1932 1977 startrev = r.rev(r.lookup(rev))
1933 1978 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1934 1979 if opts[b'alldata']:
1935 1980 # Load revisions associated with changeset.
1936 1981 ctx = repo[rev]
1937 1982 mtext = _manifestrevision(repo, ctx.manifestnode())
1938 1983 for pctx in ctx.parents():
1939 1984 pman = _manifestrevision(repo, pctx.manifestnode())
1940 1985 textpairs.append((pman, mtext))
1941 1986
1942 1987 # Load filelog revisions by iterating manifest delta.
1943 1988 man = ctx.manifest()
1944 1989 pman = ctx.p1().manifest()
1945 1990 for filename, change in pman.diff(man).items():
1946 1991 fctx = repo.file(filename)
1947 1992 f1 = fctx.revision(change[0][0] or -1)
1948 1993 f2 = fctx.revision(change[1][0] or -1)
1949 1994 textpairs.append((f1, f2))
1950 1995 else:
1951 1996 dp = r.deltaparent(rev)
1952 1997 textpairs.append((r.revision(dp), r.revision(rev)))
1953 1998
1954 1999 withthreads = threads > 0
1955 2000 if not withthreads:
1956 2001 def d():
1957 2002 for pair in textpairs:
1958 2003 if xdiff:
1959 2004 mdiff.bdiff.xdiffblocks(*pair)
1960 2005 elif blocks:
1961 2006 mdiff.bdiff.blocks(*pair)
1962 2007 else:
1963 2008 mdiff.textdiff(*pair)
1964 2009 else:
1965 2010 q = queue()
1966 2011 for i in _xrange(threads):
1967 2012 q.put(None)
1968 2013 ready = threading.Condition()
1969 2014 done = threading.Event()
1970 2015 for i in _xrange(threads):
1971 2016 threading.Thread(target=_bdiffworker,
1972 2017 args=(q, blocks, xdiff, ready, done)).start()
1973 2018 q.join()
1974 2019 def d():
1975 2020 for pair in textpairs:
1976 2021 q.put(pair)
1977 2022 for i in _xrange(threads):
1978 2023 q.put(None)
1979 2024 with ready:
1980 2025 ready.notify_all()
1981 2026 q.join()
1982 2027 timer, fm = gettimer(ui, opts)
1983 2028 timer(d)
1984 2029 fm.end()
1985 2030
1986 2031 if withthreads:
1987 2032 done.set()
1988 2033 for i in _xrange(threads):
1989 2034 q.put(None)
1990 2035 with ready:
1991 2036 ready.notify_all()
1992 2037
1993 2038 @command(b'perfunidiff', revlogopts + formatteropts + [
1994 2039 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1995 2040 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
1996 2041 ], b'-c|-m|FILE REV')
1997 2042 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1998 2043 """benchmark a unified diff between revisions
1999 2044
2000 2045 This doesn't include any copy tracing - it's just a unified diff
2001 2046 of the texts.
2002 2047
2003 2048 By default, benchmark a diff between its delta parent and itself.
2004 2049
2005 2050 With ``--count``, benchmark diffs between delta parents and self for N
2006 2051 revisions starting at the specified revision.
2007 2052
2008 2053 With ``--alldata``, assume the requested revision is a changeset and
2009 2054 measure diffs for all changes related to that changeset (manifest
2010 2055 and filelogs).
2011 2056 """
2012 2057 opts = _byteskwargs(opts)
2013 2058 if opts[b'alldata']:
2014 2059 opts[b'changelog'] = True
2015 2060
2016 2061 if opts.get(b'changelog') or opts.get(b'manifest'):
2017 2062 file_, rev = None, file_
2018 2063 elif rev is None:
2019 2064 raise error.CommandError(b'perfunidiff', b'invalid arguments')
2020 2065
2021 2066 textpairs = []
2022 2067
2023 2068 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
2024 2069
2025 2070 startrev = r.rev(r.lookup(rev))
2026 2071 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2027 2072 if opts[b'alldata']:
2028 2073 # Load revisions associated with changeset.
2029 2074 ctx = repo[rev]
2030 2075 mtext = _manifestrevision(repo, ctx.manifestnode())
2031 2076 for pctx in ctx.parents():
2032 2077 pman = _manifestrevision(repo, pctx.manifestnode())
2033 2078 textpairs.append((pman, mtext))
2034 2079
2035 2080 # Load filelog revisions by iterating manifest delta.
2036 2081 man = ctx.manifest()
2037 2082 pman = ctx.p1().manifest()
2038 2083 for filename, change in pman.diff(man).items():
2039 2084 fctx = repo.file(filename)
2040 2085 f1 = fctx.revision(change[0][0] or -1)
2041 2086 f2 = fctx.revision(change[1][0] or -1)
2042 2087 textpairs.append((f1, f2))
2043 2088 else:
2044 2089 dp = r.deltaparent(rev)
2045 2090 textpairs.append((r.revision(dp), r.revision(rev)))
2046 2091
2047 2092 def d():
2048 2093 for left, right in textpairs:
2049 2094 # The date strings don't matter, so we pass empty strings.
2050 2095 headerlines, hunks = mdiff.unidiff(
2051 2096 left, b'', right, b'', b'left', b'right', binary=False)
2052 2097 # consume iterators in roughly the way patch.py does
2053 2098 b'\n'.join(headerlines)
2054 2099 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2055 2100 timer, fm = gettimer(ui, opts)
2056 2101 timer(d)
2057 2102 fm.end()
2058 2103
2059 2104 @command(b'perfdiffwd', formatteropts)
2060 2105 def perfdiffwd(ui, repo, **opts):
2061 2106 """Profile diff of working directory changes"""
2062 2107 opts = _byteskwargs(opts)
2063 2108 timer, fm = gettimer(ui, opts)
2064 2109 options = {
2065 2110 'w': 'ignore_all_space',
2066 2111 'b': 'ignore_space_change',
2067 2112 'B': 'ignore_blank_lines',
2068 2113 }
2069 2114
2070 2115 for diffopt in ('', 'w', 'b', 'B', 'wB'):
2071 2116 opts = dict((options[c], b'1') for c in diffopt)
2072 2117 def d():
2073 2118 ui.pushbuffer()
2074 2119 commands.diff(ui, repo, **opts)
2075 2120 ui.popbuffer()
2076 2121 diffopt = diffopt.encode('ascii')
2077 2122 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
2078 2123 timer(d, title=title)
2079 2124 fm.end()
2080 2125
2081 2126 @command(b'perfrevlogindex', revlogopts + formatteropts,
2082 2127 b'-c|-m|FILE')
2083 2128 def perfrevlogindex(ui, repo, file_=None, **opts):
2084 2129 """Benchmark operations against a revlog index.
2085 2130
2086 2131 This tests constructing a revlog instance, reading index data,
2087 2132 parsing index data, and performing various operations related to
2088 2133 index data.
2089 2134 """
2090 2135
2091 2136 opts = _byteskwargs(opts)
2092 2137
2093 2138 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
2094 2139
2095 2140 opener = getattr(rl, 'opener') # trick linter
2096 2141 indexfile = rl.indexfile
2097 2142 data = opener.read(indexfile)
2098 2143
2099 2144 header = struct.unpack(b'>I', data[0:4])[0]
2100 2145 version = header & 0xFFFF
2101 2146 if version == 1:
2102 2147 revlogio = revlog.revlogio()
2103 2148 inline = header & (1 << 16)
2104 2149 else:
2105 2150 raise error.Abort((b'unsupported revlog version: %d') % version)
2106 2151
2107 2152 rllen = len(rl)
2108 2153
2109 2154 node0 = rl.node(0)
2110 2155 node25 = rl.node(rllen // 4)
2111 2156 node50 = rl.node(rllen // 2)
2112 2157 node75 = rl.node(rllen // 4 * 3)
2113 2158 node100 = rl.node(rllen - 1)
2114 2159
2115 2160 allrevs = range(rllen)
2116 2161 allrevsrev = list(reversed(allrevs))
2117 2162 allnodes = [rl.node(rev) for rev in range(rllen)]
2118 2163 allnodesrev = list(reversed(allnodes))
2119 2164
2120 2165 def constructor():
2121 2166 revlog.revlog(opener, indexfile)
2122 2167
2123 2168 def read():
2124 2169 with opener(indexfile) as fh:
2125 2170 fh.read()
2126 2171
2127 2172 def parseindex():
2128 2173 revlogio.parseindex(data, inline)
2129 2174
2130 2175 def getentry(revornode):
2131 2176 index = revlogio.parseindex(data, inline)[0]
2132 2177 index[revornode]
2133 2178
2134 2179 def getentries(revs, count=1):
2135 2180 index = revlogio.parseindex(data, inline)[0]
2136 2181
2137 2182 for i in range(count):
2138 2183 for rev in revs:
2139 2184 index[rev]
2140 2185
2141 2186 def resolvenode(node):
2142 2187 nodemap = revlogio.parseindex(data, inline)[1]
2143 2188 # This only works for the C code.
2144 2189 if nodemap is None:
2145 2190 return
2146 2191
2147 2192 try:
2148 2193 nodemap[node]
2149 2194 except error.RevlogError:
2150 2195 pass
2151 2196
2152 2197 def resolvenodes(nodes, count=1):
2153 2198 nodemap = revlogio.parseindex(data, inline)[1]
2154 2199 if nodemap is None:
2155 2200 return
2156 2201
2157 2202 for i in range(count):
2158 2203 for node in nodes:
2159 2204 try:
2160 2205 nodemap[node]
2161 2206 except error.RevlogError:
2162 2207 pass
2163 2208
2164 2209 benches = [
2165 2210 (constructor, b'revlog constructor'),
2166 2211 (read, b'read'),
2167 2212 (parseindex, b'create index object'),
2168 2213 (lambda: getentry(0), b'retrieve index entry for rev 0'),
2169 2214 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
2170 2215 (lambda: resolvenode(node0), b'look up node at rev 0'),
2171 2216 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
2172 2217 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
2173 2218 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
2174 2219 (lambda: resolvenode(node100), b'look up node at tip'),
2175 2220 # 2x variation is to measure caching impact.
2176 2221 (lambda: resolvenodes(allnodes),
2177 2222 b'look up all nodes (forward)'),
2178 2223 (lambda: resolvenodes(allnodes, 2),
2179 2224 b'look up all nodes 2x (forward)'),
2180 2225 (lambda: resolvenodes(allnodesrev),
2181 2226 b'look up all nodes (reverse)'),
2182 2227 (lambda: resolvenodes(allnodesrev, 2),
2183 2228 b'look up all nodes 2x (reverse)'),
2184 2229 (lambda: getentries(allrevs),
2185 2230 b'retrieve all index entries (forward)'),
2186 2231 (lambda: getentries(allrevs, 2),
2187 2232 b'retrieve all index entries 2x (forward)'),
2188 2233 (lambda: getentries(allrevsrev),
2189 2234 b'retrieve all index entries (reverse)'),
2190 2235 (lambda: getentries(allrevsrev, 2),
2191 2236 b'retrieve all index entries 2x (reverse)'),
2192 2237 ]
2193 2238
2194 2239 for fn, title in benches:
2195 2240 timer, fm = gettimer(ui, opts)
2196 2241 timer(fn, title=title)
2197 2242 fm.end()
2198 2243
2199 2244 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
2200 2245 [(b'd', b'dist', 100, b'distance between the revisions'),
2201 2246 (b's', b'startrev', 0, b'revision to start reading at'),
2202 2247 (b'', b'reverse', False, b'read in reverse')],
2203 2248 b'-c|-m|FILE')
2204 2249 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
2205 2250 **opts):
2206 2251 """Benchmark reading a series of revisions from a revlog.
2207 2252
2208 2253 By default, we read every ``-d/--dist`` revision from 0 to tip of
2209 2254 the specified revlog.
2210 2255
2211 2256 The start revision can be defined via ``-s/--startrev``.
2212 2257 """
2213 2258 opts = _byteskwargs(opts)
2214 2259
2215 2260 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
2216 2261 rllen = getlen(ui)(rl)
2217 2262
2218 2263 if startrev < 0:
2219 2264 startrev = rllen + startrev
2220 2265
2221 2266 def d():
2222 2267 rl.clearcaches()
2223 2268
2224 2269 beginrev = startrev
2225 2270 endrev = rllen
2226 2271 dist = opts[b'dist']
2227 2272
2228 2273 if reverse:
2229 2274 beginrev, endrev = endrev - 1, beginrev - 1
2230 2275 dist = -1 * dist
2231 2276
2232 2277 for x in _xrange(beginrev, endrev, dist):
2233 2278 # Old revisions don't support passing int.
2234 2279 n = rl.node(x)
2235 2280 rl.revision(n)
2236 2281
2237 2282 timer, fm = gettimer(ui, opts)
2238 2283 timer(d)
2239 2284 fm.end()
2240 2285
2241 2286 @command(b'perfrevlogwrite', revlogopts + formatteropts +
2242 2287 [(b's', b'startrev', 1000, b'revision to start writing at'),
2243 2288 (b'', b'stoprev', -1, b'last revision to write'),
2244 2289 (b'', b'count', 3, b'number of passes to perform'),
2245 2290 (b'', b'details', False, b'print timing for every revisions tested'),
2246 2291 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
2247 2292 (b'', b'lazydeltabase', True, b'try the provided delta first'),
2248 2293 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
2249 2294 ],
2250 2295 b'-c|-m|FILE')
2251 2296 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
2252 2297 """Benchmark writing a series of revisions to a revlog.
2253 2298
2254 2299 Possible source values are:
2255 2300 * `full`: add from a full text (default).
2256 2301 * `parent-1`: add from a delta to the first parent
2257 2302 * `parent-2`: add from a delta to the second parent if it exists
2258 2303 (use a delta from the first parent otherwise)
2259 2304 * `parent-smallest`: add from the smallest delta (either p1 or p2)
2260 2305 * `storage`: add from the existing precomputed deltas
2261 2306
2262 2307 Note: This performance command measures performance in a custom way. As a
2263 2308 result some of the global configuration of the 'perf' command does not
2264 2309 apply to it:
2265 2310
2266 2311 * ``pre-run``: disabled
2267 2312
2268 2313 * ``profile-benchmark``: disabled
2269 2314
2270 2315 * ``run-limits``: disabled use --count instead
2271 2316 """
2272 2317 opts = _byteskwargs(opts)
2273 2318
2274 2319 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
2275 2320 rllen = getlen(ui)(rl)
2276 2321 if startrev < 0:
2277 2322 startrev = rllen + startrev
2278 2323 if stoprev < 0:
2279 2324 stoprev = rllen + stoprev
2280 2325
2281 2326 lazydeltabase = opts['lazydeltabase']
2282 2327 source = opts['source']
2283 2328 clearcaches = opts['clear_caches']
2284 2329 validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest',
2285 2330 b'storage')
2286 2331 if source not in validsource:
2287 2332 raise error.Abort('invalid source type: %s' % source)
2288 2333
2289 2334 ### actually gather results
2290 2335 count = opts['count']
2291 2336 if count <= 0:
2292 2337 raise error.Abort('invalide run count: %d' % count)
2293 2338 allresults = []
2294 2339 for c in range(count):
2295 2340 timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1,
2296 2341 lazydeltabase=lazydeltabase,
2297 2342 clearcaches=clearcaches)
2298 2343 allresults.append(timing)
2299 2344
2300 2345 ### consolidate the results in a single list
2301 2346 results = []
2302 2347 for idx, (rev, t) in enumerate(allresults[0]):
2303 2348 ts = [t]
2304 2349 for other in allresults[1:]:
2305 2350 orev, ot = other[idx]
2306 2351 assert orev == rev
2307 2352 ts.append(ot)
2308 2353 results.append((rev, ts))
2309 2354 resultcount = len(results)
2310 2355
2311 2356 ### Compute and display relevant statistics
2312 2357
2313 2358 # get a formatter
2314 2359 fm = ui.formatter(b'perf', opts)
2315 2360 displayall = ui.configbool(b"perf", b"all-timing", False)
2316 2361
2317 2362 # print individual details if requested
2318 2363 if opts['details']:
2319 2364 for idx, item in enumerate(results, 1):
2320 2365 rev, data = item
2321 2366 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
2322 2367 formatone(fm, data, title=title, displayall=displayall)
2323 2368
2324 2369 # sorts results by median time
2325 2370 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
2326 2371 # list of (name, index) to display)
2327 2372 relevants = [
2328 2373 ("min", 0),
2329 2374 ("10%", resultcount * 10 // 100),
2330 2375 ("25%", resultcount * 25 // 100),
2331 2376 ("50%", resultcount * 70 // 100),
2332 2377 ("75%", resultcount * 75 // 100),
2333 2378 ("90%", resultcount * 90 // 100),
2334 2379 ("95%", resultcount * 95 // 100),
2335 2380 ("99%", resultcount * 99 // 100),
2336 2381 ("99.9%", resultcount * 999 // 1000),
2337 2382 ("99.99%", resultcount * 9999 // 10000),
2338 2383 ("99.999%", resultcount * 99999 // 100000),
2339 2384 ("max", -1),
2340 2385 ]
2341 2386 if not ui.quiet:
2342 2387 for name, idx in relevants:
2343 2388 data = results[idx]
2344 2389 title = '%s of %d, rev %d' % (name, resultcount, data[0])
2345 2390 formatone(fm, data[1], title=title, displayall=displayall)
2346 2391
2347 2392 # XXX summing that many float will not be very precise, we ignore this fact
2348 2393 # for now
2349 2394 totaltime = []
2350 2395 for item in allresults:
2351 2396 totaltime.append((sum(x[1][0] for x in item),
2352 2397 sum(x[1][1] for x in item),
2353 2398 sum(x[1][2] for x in item),)
2354 2399 )
2355 2400 formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
2356 2401 displayall=displayall)
2357 2402 fm.end()
2358 2403
2359 2404 class _faketr(object):
2360 2405 def add(s, x, y, z=None):
2361 2406 return None
2362 2407
2363 2408 def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None,
2364 2409 lazydeltabase=True, clearcaches=True):
2365 2410 timings = []
2366 2411 tr = _faketr()
2367 2412 with _temprevlog(ui, orig, startrev) as dest:
2368 2413 dest._lazydeltabase = lazydeltabase
2369 2414 revs = list(orig.revs(startrev, stoprev))
2370 2415 total = len(revs)
2371 2416 topic = 'adding'
2372 2417 if runidx is not None:
2373 2418 topic += ' (run #%d)' % runidx
2374 2419 # Support both old and new progress API
2375 2420 if util.safehasattr(ui, 'makeprogress'):
2376 2421 progress = ui.makeprogress(topic, unit='revs', total=total)
2377 2422 def updateprogress(pos):
2378 2423 progress.update(pos)
2379 2424 def completeprogress():
2380 2425 progress.complete()
2381 2426 else:
2382 2427 def updateprogress(pos):
2383 2428 ui.progress(topic, pos, unit='revs', total=total)
2384 2429 def completeprogress():
2385 2430 ui.progress(topic, None, unit='revs', total=total)
2386 2431
2387 2432 for idx, rev in enumerate(revs):
2388 2433 updateprogress(idx)
2389 2434 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
2390 2435 if clearcaches:
2391 2436 dest.index.clearcaches()
2392 2437 dest.clearcaches()
2393 2438 with timeone() as r:
2394 2439 dest.addrawrevision(*addargs, **addkwargs)
2395 2440 timings.append((rev, r[0]))
2396 2441 updateprogress(total)
2397 2442 completeprogress()
2398 2443 return timings
2399 2444
2400 2445 def _getrevisionseed(orig, rev, tr, source):
2401 2446 from mercurial.node import nullid
2402 2447
2403 2448 linkrev = orig.linkrev(rev)
2404 2449 node = orig.node(rev)
2405 2450 p1, p2 = orig.parents(node)
2406 2451 flags = orig.flags(rev)
2407 2452 cachedelta = None
2408 2453 text = None
2409 2454
2410 2455 if source == b'full':
2411 2456 text = orig.revision(rev)
2412 2457 elif source == b'parent-1':
2413 2458 baserev = orig.rev(p1)
2414 2459 cachedelta = (baserev, orig.revdiff(p1, rev))
2415 2460 elif source == b'parent-2':
2416 2461 parent = p2
2417 2462 if p2 == nullid:
2418 2463 parent = p1
2419 2464 baserev = orig.rev(parent)
2420 2465 cachedelta = (baserev, orig.revdiff(parent, rev))
2421 2466 elif source == b'parent-smallest':
2422 2467 p1diff = orig.revdiff(p1, rev)
2423 2468 parent = p1
2424 2469 diff = p1diff
2425 2470 if p2 != nullid:
2426 2471 p2diff = orig.revdiff(p2, rev)
2427 2472 if len(p1diff) > len(p2diff):
2428 2473 parent = p2
2429 2474 diff = p2diff
2430 2475 baserev = orig.rev(parent)
2431 2476 cachedelta = (baserev, diff)
2432 2477 elif source == b'storage':
2433 2478 baserev = orig.deltaparent(rev)
2434 2479 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
2435 2480
2436 2481 return ((text, tr, linkrev, p1, p2),
2437 2482 {'node': node, 'flags': flags, 'cachedelta': cachedelta})
2438 2483
2439 2484 @contextlib.contextmanager
2440 2485 def _temprevlog(ui, orig, truncaterev):
2441 2486 from mercurial import vfs as vfsmod
2442 2487
2443 2488 if orig._inline:
2444 2489 raise error.Abort('not supporting inline revlog (yet)')
2445 2490 revlogkwargs = {}
2446 2491 k = 'upperboundcomp'
2447 2492 if util.safehasattr(orig, k):
2448 2493 revlogkwargs[k] = getattr(orig, k)
2449 2494
2450 2495 origindexpath = orig.opener.join(orig.indexfile)
2451 2496 origdatapath = orig.opener.join(orig.datafile)
2452 2497 indexname = 'revlog.i'
2453 2498 dataname = 'revlog.d'
2454 2499
2455 2500 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
2456 2501 try:
2457 2502 # copy the data file in a temporary directory
2458 2503 ui.debug('copying data in %s\n' % tmpdir)
2459 2504 destindexpath = os.path.join(tmpdir, 'revlog.i')
2460 2505 destdatapath = os.path.join(tmpdir, 'revlog.d')
2461 2506 shutil.copyfile(origindexpath, destindexpath)
2462 2507 shutil.copyfile(origdatapath, destdatapath)
2463 2508
2464 2509 # remove the data we want to add again
2465 2510 ui.debug('truncating data to be rewritten\n')
2466 2511 with open(destindexpath, 'ab') as index:
2467 2512 index.seek(0)
2468 2513 index.truncate(truncaterev * orig._io.size)
2469 2514 with open(destdatapath, 'ab') as data:
2470 2515 data.seek(0)
2471 2516 data.truncate(orig.start(truncaterev))
2472 2517
2473 2518 # instantiate a new revlog from the temporary copy
2474 2519 ui.debug('truncating adding to be rewritten\n')
2475 2520 vfs = vfsmod.vfs(tmpdir)
2476 2521 vfs.options = getattr(orig.opener, 'options', None)
2477 2522
2478 2523 dest = revlog.revlog(vfs,
2479 2524 indexfile=indexname,
2480 2525 datafile=dataname, **revlogkwargs)
2481 2526 if dest._inline:
2482 2527 raise error.Abort('not supporting inline revlog (yet)')
2483 2528 # make sure internals are initialized
2484 2529 dest.revision(len(dest) - 1)
2485 2530 yield dest
2486 2531 del dest, vfs
2487 2532 finally:
2488 2533 shutil.rmtree(tmpdir, True)
2489 2534
2490 2535 @command(b'perfrevlogchunks', revlogopts + formatteropts +
2491 2536 [(b'e', b'engines', b'', b'compression engines to use'),
2492 2537 (b's', b'startrev', 0, b'revision to start at')],
2493 2538 b'-c|-m|FILE')
2494 2539 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
2495 2540 """Benchmark operations on revlog chunks.
2496 2541
2497 2542 Logically, each revlog is a collection of fulltext revisions. However,
2498 2543 stored within each revlog are "chunks" of possibly compressed data. This
2499 2544 data needs to be read and decompressed or compressed and written.
2500 2545
2501 2546 This command measures the time it takes to read+decompress and recompress
2502 2547 chunks in a revlog. It effectively isolates I/O and compression performance.
2503 2548 For measurements of higher-level operations like resolving revisions,
2504 2549 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
2505 2550 """
2506 2551 opts = _byteskwargs(opts)
2507 2552
2508 2553 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
2509 2554
2510 2555 # _chunkraw was renamed to _getsegmentforrevs.
2511 2556 try:
2512 2557 segmentforrevs = rl._getsegmentforrevs
2513 2558 except AttributeError:
2514 2559 segmentforrevs = rl._chunkraw
2515 2560
2516 2561 # Verify engines argument.
2517 2562 if engines:
2518 2563 engines = set(e.strip() for e in engines.split(b','))
2519 2564 for engine in engines:
2520 2565 try:
2521 2566 util.compressionengines[engine]
2522 2567 except KeyError:
2523 2568 raise error.Abort(b'unknown compression engine: %s' % engine)
2524 2569 else:
2525 2570 engines = []
2526 2571 for e in util.compengines:
2527 2572 engine = util.compengines[e]
2528 2573 try:
2529 2574 if engine.available():
2530 2575 engine.revlogcompressor().compress(b'dummy')
2531 2576 engines.append(e)
2532 2577 except NotImplementedError:
2533 2578 pass
2534 2579
2535 2580 revs = list(rl.revs(startrev, len(rl) - 1))
2536 2581
2537 2582 def rlfh(rl):
2538 2583 if rl._inline:
2539 2584 return getsvfs(repo)(rl.indexfile)
2540 2585 else:
2541 2586 return getsvfs(repo)(rl.datafile)
2542 2587
2543 2588 def doread():
2544 2589 rl.clearcaches()
2545 2590 for rev in revs:
2546 2591 segmentforrevs(rev, rev)
2547 2592
2548 2593 def doreadcachedfh():
2549 2594 rl.clearcaches()
2550 2595 fh = rlfh(rl)
2551 2596 for rev in revs:
2552 2597 segmentforrevs(rev, rev, df=fh)
2553 2598
2554 2599 def doreadbatch():
2555 2600 rl.clearcaches()
2556 2601 segmentforrevs(revs[0], revs[-1])
2557 2602
2558 2603 def doreadbatchcachedfh():
2559 2604 rl.clearcaches()
2560 2605 fh = rlfh(rl)
2561 2606 segmentforrevs(revs[0], revs[-1], df=fh)
2562 2607
2563 2608 def dochunk():
2564 2609 rl.clearcaches()
2565 2610 fh = rlfh(rl)
2566 2611 for rev in revs:
2567 2612 rl._chunk(rev, df=fh)
2568 2613
2569 2614 chunks = [None]
2570 2615
2571 2616 def dochunkbatch():
2572 2617 rl.clearcaches()
2573 2618 fh = rlfh(rl)
2574 2619 # Save chunks as a side-effect.
2575 2620 chunks[0] = rl._chunks(revs, df=fh)
2576 2621
2577 2622 def docompress(compressor):
2578 2623 rl.clearcaches()
2579 2624
2580 2625 try:
2581 2626 # Swap in the requested compression engine.
2582 2627 oldcompressor = rl._compressor
2583 2628 rl._compressor = compressor
2584 2629 for chunk in chunks[0]:
2585 2630 rl.compress(chunk)
2586 2631 finally:
2587 2632 rl._compressor = oldcompressor
2588 2633
2589 2634 benches = [
2590 2635 (lambda: doread(), b'read'),
2591 2636 (lambda: doreadcachedfh(), b'read w/ reused fd'),
2592 2637 (lambda: doreadbatch(), b'read batch'),
2593 2638 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
2594 2639 (lambda: dochunk(), b'chunk'),
2595 2640 (lambda: dochunkbatch(), b'chunk batch'),
2596 2641 ]
2597 2642
2598 2643 for engine in sorted(engines):
2599 2644 compressor = util.compengines[engine].revlogcompressor()
2600 2645 benches.append((functools.partial(docompress, compressor),
2601 2646 b'compress w/ %s' % engine))
2602 2647
2603 2648 for fn, title in benches:
2604 2649 timer, fm = gettimer(ui, opts)
2605 2650 timer(fn, title=title)
2606 2651 fm.end()
2607 2652
2608 2653 @command(b'perfrevlogrevision', revlogopts + formatteropts +
2609 2654 [(b'', b'cache', False, b'use caches instead of clearing')],
2610 2655 b'-c|-m|FILE REV')
2611 2656 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
2612 2657 """Benchmark obtaining a revlog revision.
2613 2658
2614 2659 Obtaining a revlog revision consists of roughly the following steps:
2615 2660
2616 2661 1. Compute the delta chain
2617 2662 2. Slice the delta chain if applicable
2618 2663 3. Obtain the raw chunks for that delta chain
2619 2664 4. Decompress each raw chunk
2620 2665 5. Apply binary patches to obtain fulltext
2621 2666 6. Verify hash of fulltext
2622 2667
2623 2668 This command measures the time spent in each of these phases.
2624 2669 """
2625 2670 opts = _byteskwargs(opts)
2626 2671
2627 2672 if opts.get(b'changelog') or opts.get(b'manifest'):
2628 2673 file_, rev = None, file_
2629 2674 elif rev is None:
2630 2675 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
2631 2676
2632 2677 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
2633 2678
2634 2679 # _chunkraw was renamed to _getsegmentforrevs.
2635 2680 try:
2636 2681 segmentforrevs = r._getsegmentforrevs
2637 2682 except AttributeError:
2638 2683 segmentforrevs = r._chunkraw
2639 2684
2640 2685 node = r.lookup(rev)
2641 2686 rev = r.rev(node)
2642 2687
2643 2688 def getrawchunks(data, chain):
2644 2689 start = r.start
2645 2690 length = r.length
2646 2691 inline = r._inline
2647 2692 iosize = r._io.size
2648 2693 buffer = util.buffer
2649 2694
2650 2695 chunks = []
2651 2696 ladd = chunks.append
2652 2697 for idx, item in enumerate(chain):
2653 2698 offset = start(item[0])
2654 2699 bits = data[idx]
2655 2700 for rev in item:
2656 2701 chunkstart = start(rev)
2657 2702 if inline:
2658 2703 chunkstart += (rev + 1) * iosize
2659 2704 chunklength = length(rev)
2660 2705 ladd(buffer(bits, chunkstart - offset, chunklength))
2661 2706
2662 2707 return chunks
2663 2708
2664 2709 def dodeltachain(rev):
2665 2710 if not cache:
2666 2711 r.clearcaches()
2667 2712 r._deltachain(rev)
2668 2713
2669 2714 def doread(chain):
2670 2715 if not cache:
2671 2716 r.clearcaches()
2672 2717 for item in slicedchain:
2673 2718 segmentforrevs(item[0], item[-1])
2674 2719
2675 2720 def doslice(r, chain, size):
2676 2721 for s in slicechunk(r, chain, targetsize=size):
2677 2722 pass
2678 2723
2679 2724 def dorawchunks(data, chain):
2680 2725 if not cache:
2681 2726 r.clearcaches()
2682 2727 getrawchunks(data, chain)
2683 2728
2684 2729 def dodecompress(chunks):
2685 2730 decomp = r.decompress
2686 2731 for chunk in chunks:
2687 2732 decomp(chunk)
2688 2733
2689 2734 def dopatch(text, bins):
2690 2735 if not cache:
2691 2736 r.clearcaches()
2692 2737 mdiff.patches(text, bins)
2693 2738
2694 2739 def dohash(text):
2695 2740 if not cache:
2696 2741 r.clearcaches()
2697 2742 r.checkhash(text, node, rev=rev)
2698 2743
2699 2744 def dorevision():
2700 2745 if not cache:
2701 2746 r.clearcaches()
2702 2747 r.revision(node)
2703 2748
2704 2749 try:
2705 2750 from mercurial.revlogutils.deltas import slicechunk
2706 2751 except ImportError:
2707 2752 slicechunk = getattr(revlog, '_slicechunk', None)
2708 2753
2709 2754 size = r.length(rev)
2710 2755 chain = r._deltachain(rev)[0]
2711 2756 if not getattr(r, '_withsparseread', False):
2712 2757 slicedchain = (chain,)
2713 2758 else:
2714 2759 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
2715 2760 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
2716 2761 rawchunks = getrawchunks(data, slicedchain)
2717 2762 bins = r._chunks(chain)
2718 2763 text = bytes(bins[0])
2719 2764 bins = bins[1:]
2720 2765 text = mdiff.patches(text, bins)
2721 2766
2722 2767 benches = [
2723 2768 (lambda: dorevision(), b'full'),
2724 2769 (lambda: dodeltachain(rev), b'deltachain'),
2725 2770 (lambda: doread(chain), b'read'),
2726 2771 ]
2727 2772
2728 2773 if getattr(r, '_withsparseread', False):
2729 2774 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
2730 2775 benches.append(slicing)
2731 2776
2732 2777 benches.extend([
2733 2778 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
2734 2779 (lambda: dodecompress(rawchunks), b'decompress'),
2735 2780 (lambda: dopatch(text, bins), b'patch'),
2736 2781 (lambda: dohash(text), b'hash'),
2737 2782 ])
2738 2783
2739 2784 timer, fm = gettimer(ui, opts)
2740 2785 for fn, title in benches:
2741 2786 timer(fn, title=title)
2742 2787 fm.end()
2743 2788
2744 2789 @command(b'perfrevset',
2745 2790 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
2746 2791 (b'', b'contexts', False, b'obtain changectx for each revision')]
2747 2792 + formatteropts, b"REVSET")
2748 2793 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
2749 2794 """benchmark the execution time of a revset
2750 2795
2751 2796 Use the --clean option if need to evaluate the impact of build volatile
2752 2797 revisions set cache on the revset execution. Volatile cache hold filtered
2753 2798 and obsolete related cache."""
2754 2799 opts = _byteskwargs(opts)
2755 2800
2756 2801 timer, fm = gettimer(ui, opts)
2757 2802 def d():
2758 2803 if clear:
2759 2804 repo.invalidatevolatilesets()
2760 2805 if contexts:
2761 2806 for ctx in repo.set(expr): pass
2762 2807 else:
2763 2808 for r in repo.revs(expr): pass
2764 2809 timer(d)
2765 2810 fm.end()
2766 2811
2767 2812 @command(b'perfvolatilesets',
2768 2813 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
2769 2814 ] + formatteropts)
2770 2815 def perfvolatilesets(ui, repo, *names, **opts):
2771 2816 """benchmark the computation of various volatile set
2772 2817
2773 2818 Volatile set computes element related to filtering and obsolescence."""
2774 2819 opts = _byteskwargs(opts)
2775 2820 timer, fm = gettimer(ui, opts)
2776 2821 repo = repo.unfiltered()
2777 2822
2778 2823 def getobs(name):
2779 2824 def d():
2780 2825 repo.invalidatevolatilesets()
2781 2826 if opts[b'clear_obsstore']:
2782 2827 clearfilecache(repo, b'obsstore')
2783 2828 obsolete.getrevs(repo, name)
2784 2829 return d
2785 2830
2786 2831 allobs = sorted(obsolete.cachefuncs)
2787 2832 if names:
2788 2833 allobs = [n for n in allobs if n in names]
2789 2834
2790 2835 for name in allobs:
2791 2836 timer(getobs(name), title=name)
2792 2837
2793 2838 def getfiltered(name):
2794 2839 def d():
2795 2840 repo.invalidatevolatilesets()
2796 2841 if opts[b'clear_obsstore']:
2797 2842 clearfilecache(repo, b'obsstore')
2798 2843 repoview.filterrevs(repo, name)
2799 2844 return d
2800 2845
2801 2846 allfilter = sorted(repoview.filtertable)
2802 2847 if names:
2803 2848 allfilter = [n for n in allfilter if n in names]
2804 2849
2805 2850 for name in allfilter:
2806 2851 timer(getfiltered(name), title=name)
2807 2852 fm.end()
2808 2853
2809 2854 @command(b'perfbranchmap',
2810 2855 [(b'f', b'full', False,
2811 2856 b'Includes build time of subset'),
2812 2857 (b'', b'clear-revbranch', False,
2813 2858 b'purge the revbranch cache between computation'),
2814 2859 ] + formatteropts)
2815 2860 def perfbranchmap(ui, repo, *filternames, **opts):
2816 2861 """benchmark the update of a branchmap
2817 2862
2818 2863 This benchmarks the full repo.branchmap() call with read and write disabled
2819 2864 """
2820 2865 opts = _byteskwargs(opts)
2821 2866 full = opts.get(b"full", False)
2822 2867 clear_revbranch = opts.get(b"clear_revbranch", False)
2823 2868 timer, fm = gettimer(ui, opts)
2824 2869 def getbranchmap(filtername):
2825 2870 """generate a benchmark function for the filtername"""
2826 2871 if filtername is None:
2827 2872 view = repo
2828 2873 else:
2829 2874 view = repo.filtered(filtername)
2830 2875 if util.safehasattr(view._branchcaches, '_per_filter'):
2831 2876 filtered = view._branchcaches._per_filter
2832 2877 else:
2833 2878 # older versions
2834 2879 filtered = view._branchcaches
2835 2880 def d():
2836 2881 if clear_revbranch:
2837 2882 repo.revbranchcache()._clear()
2838 2883 if full:
2839 2884 view._branchcaches.clear()
2840 2885 else:
2841 2886 filtered.pop(filtername, None)
2842 2887 view.branchmap()
2843 2888 return d
2844 2889 # add filter in smaller subset to bigger subset
2845 2890 possiblefilters = set(repoview.filtertable)
2846 2891 if filternames:
2847 2892 possiblefilters &= set(filternames)
2848 2893 subsettable = getbranchmapsubsettable()
2849 2894 allfilters = []
2850 2895 while possiblefilters:
2851 2896 for name in possiblefilters:
2852 2897 subset = subsettable.get(name)
2853 2898 if subset not in possiblefilters:
2854 2899 break
2855 2900 else:
2856 2901 assert False, b'subset cycle %s!' % possiblefilters
2857 2902 allfilters.append(name)
2858 2903 possiblefilters.remove(name)
2859 2904
2860 2905 # warm the cache
2861 2906 if not full:
2862 2907 for name in allfilters:
2863 2908 repo.filtered(name).branchmap()
2864 2909 if not filternames or b'unfiltered' in filternames:
2865 2910 # add unfiltered
2866 2911 allfilters.append(None)
2867 2912
2868 2913 if util.safehasattr(branchmap.branchcache, 'fromfile'):
2869 2914 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
2870 2915 branchcacheread.set(classmethod(lambda *args: None))
2871 2916 else:
2872 2917 # older versions
2873 2918 branchcacheread = safeattrsetter(branchmap, b'read')
2874 2919 branchcacheread.set(lambda *args: None)
2875 2920 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
2876 2921 branchcachewrite.set(lambda *args: None)
2877 2922 try:
2878 2923 for name in allfilters:
2879 2924 printname = name
2880 2925 if name is None:
2881 2926 printname = b'unfiltered'
2882 2927 timer(getbranchmap(name), title=str(printname))
2883 2928 finally:
2884 2929 branchcacheread.restore()
2885 2930 branchcachewrite.restore()
2886 2931 fm.end()
2887 2932
2888 2933 @command(b'perfbranchmapupdate', [
2889 2934 (b'', b'base', [], b'subset of revision to start from'),
2890 2935 (b'', b'target', [], b'subset of revision to end with'),
2891 2936 (b'', b'clear-caches', False, b'clear cache between each runs')
2892 2937 ] + formatteropts)
2893 2938 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
2894 2939 """benchmark branchmap update from for <base> revs to <target> revs
2895 2940
2896 2941 If `--clear-caches` is passed, the following items will be reset before
2897 2942 each update:
2898 2943 * the changelog instance and associated indexes
2899 2944 * the rev-branch-cache instance
2900 2945
2901 2946 Examples:
2902 2947
2903 2948 # update for the one last revision
2904 2949 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
2905 2950
2906 2951 $ update for change coming with a new branch
2907 2952 $ hg perfbranchmapupdate --base 'stable' --target 'default'
2908 2953 """
2909 2954 from mercurial import branchmap
2910 2955 from mercurial import repoview
2911 2956 opts = _byteskwargs(opts)
2912 2957 timer, fm = gettimer(ui, opts)
2913 2958 clearcaches = opts[b'clear_caches']
2914 2959 unfi = repo.unfiltered()
2915 2960 x = [None] # used to pass data between closure
2916 2961
2917 2962 # we use a `list` here to avoid possible side effect from smartset
2918 2963 baserevs = list(scmutil.revrange(repo, base))
2919 2964 targetrevs = list(scmutil.revrange(repo, target))
2920 2965 if not baserevs:
2921 2966 raise error.Abort(b'no revisions selected for --base')
2922 2967 if not targetrevs:
2923 2968 raise error.Abort(b'no revisions selected for --target')
2924 2969
2925 2970 # make sure the target branchmap also contains the one in the base
2926 2971 targetrevs = list(set(baserevs) | set(targetrevs))
2927 2972 targetrevs.sort()
2928 2973
2929 2974 cl = repo.changelog
2930 2975 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
2931 2976 allbaserevs.sort()
2932 2977 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
2933 2978
2934 2979 newrevs = list(alltargetrevs.difference(allbaserevs))
2935 2980 newrevs.sort()
2936 2981
2937 2982 allrevs = frozenset(unfi.changelog.revs())
2938 2983 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
2939 2984 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
2940 2985
2941 2986 def basefilter(repo, visibilityexceptions=None):
2942 2987 return basefilterrevs
2943 2988
2944 2989 def targetfilter(repo, visibilityexceptions=None):
2945 2990 return targetfilterrevs
2946 2991
2947 2992 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
2948 2993 ui.status(msg % (len(allbaserevs), len(newrevs)))
2949 2994 if targetfilterrevs:
2950 2995 msg = b'(%d revisions still filtered)\n'
2951 2996 ui.status(msg % len(targetfilterrevs))
2952 2997
2953 2998 try:
2954 2999 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
2955 3000 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
2956 3001
2957 3002 baserepo = repo.filtered(b'__perf_branchmap_update_base')
2958 3003 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
2959 3004
2960 3005 # try to find an existing branchmap to reuse
2961 3006 subsettable = getbranchmapsubsettable()
2962 3007 candidatefilter = subsettable.get(None)
2963 3008 while candidatefilter is not None:
2964 3009 candidatebm = repo.filtered(candidatefilter).branchmap()
2965 3010 if candidatebm.validfor(baserepo):
2966 3011 filtered = repoview.filterrevs(repo, candidatefilter)
2967 3012 missing = [r for r in allbaserevs if r in filtered]
2968 3013 base = candidatebm.copy()
2969 3014 base.update(baserepo, missing)
2970 3015 break
2971 3016 candidatefilter = subsettable.get(candidatefilter)
2972 3017 else:
2973 3018 # no suitable subset where found
2974 3019 base = branchmap.branchcache()
2975 3020 base.update(baserepo, allbaserevs)
2976 3021
2977 3022 def setup():
2978 3023 x[0] = base.copy()
2979 3024 if clearcaches:
2980 3025 unfi._revbranchcache = None
2981 3026 clearchangelog(repo)
2982 3027
2983 3028 def bench():
2984 3029 x[0].update(targetrepo, newrevs)
2985 3030
2986 3031 timer(bench, setup=setup)
2987 3032 fm.end()
2988 3033 finally:
2989 3034 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
2990 3035 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
2991 3036
2992 3037 @command(b'perfbranchmapload', [
2993 3038 (b'f', b'filter', b'', b'Specify repoview filter'),
2994 3039 (b'', b'list', False, b'List brachmap filter caches'),
2995 3040 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
2996 3041
2997 3042 ] + formatteropts)
2998 3043 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
2999 3044 """benchmark reading the branchmap"""
3000 3045 opts = _byteskwargs(opts)
3001 3046 clearrevlogs = opts[b'clear_revlogs']
3002 3047
3003 3048 if list:
3004 3049 for name, kind, st in repo.cachevfs.readdir(stat=True):
3005 3050 if name.startswith(b'branch2'):
3006 3051 filtername = name.partition(b'-')[2] or b'unfiltered'
3007 3052 ui.status(b'%s - %s\n'
3008 3053 % (filtername, util.bytecount(st.st_size)))
3009 3054 return
3010 3055 if not filter:
3011 3056 filter = None
3012 3057 subsettable = getbranchmapsubsettable()
3013 3058 if filter is None:
3014 3059 repo = repo.unfiltered()
3015 3060 else:
3016 3061 repo = repoview.repoview(repo, filter)
3017 3062
3018 3063 repo.branchmap() # make sure we have a relevant, up to date branchmap
3019 3064
3020 3065 try:
3021 3066 fromfile = branchmap.branchcache.fromfile
3022 3067 except AttributeError:
3023 3068 # older versions
3024 3069 fromfile = branchmap.read
3025 3070
3026 3071 currentfilter = filter
3027 3072 # try once without timer, the filter may not be cached
3028 3073 while fromfile(repo) is None:
3029 3074 currentfilter = subsettable.get(currentfilter)
3030 3075 if currentfilter is None:
3031 3076 raise error.Abort(b'No branchmap cached for %s repo'
3032 3077 % (filter or b'unfiltered'))
3033 3078 repo = repo.filtered(currentfilter)
3034 3079 timer, fm = gettimer(ui, opts)
3035 3080 def setup():
3036 3081 if clearrevlogs:
3037 3082 clearchangelog(repo)
3038 3083 def bench():
3039 3084 fromfile(repo)
3040 3085 timer(bench, setup=setup)
3041 3086 fm.end()
3042 3087
3043 3088 @command(b'perfloadmarkers')
3044 3089 def perfloadmarkers(ui, repo):
3045 3090 """benchmark the time to parse the on-disk markers for a repo
3046 3091
3047 3092 Result is the number of markers in the repo."""
3048 3093 timer, fm = gettimer(ui)
3049 3094 svfs = getsvfs(repo)
3050 3095 timer(lambda: len(obsolete.obsstore(svfs)))
3051 3096 fm.end()
3052 3097
3053 3098 @command(b'perflrucachedict', formatteropts +
3054 3099 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
3055 3100 (b'', b'mincost', 0, b'smallest cost of items in cache'),
3056 3101 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
3057 3102 (b'', b'size', 4, b'size of cache'),
3058 3103 (b'', b'gets', 10000, b'number of key lookups'),
3059 3104 (b'', b'sets', 10000, b'number of key sets'),
3060 3105 (b'', b'mixed', 10000, b'number of mixed mode operations'),
3061 3106 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
3062 3107 norepo=True)
3063 3108 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
3064 3109 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
3065 3110 opts = _byteskwargs(opts)
3066 3111
3067 3112 def doinit():
3068 3113 for i in _xrange(10000):
3069 3114 util.lrucachedict(size)
3070 3115
3071 3116 costrange = list(range(mincost, maxcost + 1))
3072 3117
3073 3118 values = []
3074 3119 for i in _xrange(size):
3075 3120 values.append(random.randint(0, _maxint))
3076 3121
3077 3122 # Get mode fills the cache and tests raw lookup performance with no
3078 3123 # eviction.
3079 3124 getseq = []
3080 3125 for i in _xrange(gets):
3081 3126 getseq.append(random.choice(values))
3082 3127
3083 3128 def dogets():
3084 3129 d = util.lrucachedict(size)
3085 3130 for v in values:
3086 3131 d[v] = v
3087 3132 for key in getseq:
3088 3133 value = d[key]
3089 3134 value # silence pyflakes warning
3090 3135
3091 3136 def dogetscost():
3092 3137 d = util.lrucachedict(size, maxcost=costlimit)
3093 3138 for i, v in enumerate(values):
3094 3139 d.insert(v, v, cost=costs[i])
3095 3140 for key in getseq:
3096 3141 try:
3097 3142 value = d[key]
3098 3143 value # silence pyflakes warning
3099 3144 except KeyError:
3100 3145 pass
3101 3146
3102 3147 # Set mode tests insertion speed with cache eviction.
3103 3148 setseq = []
3104 3149 costs = []
3105 3150 for i in _xrange(sets):
3106 3151 setseq.append(random.randint(0, _maxint))
3107 3152 costs.append(random.choice(costrange))
3108 3153
3109 3154 def doinserts():
3110 3155 d = util.lrucachedict(size)
3111 3156 for v in setseq:
3112 3157 d.insert(v, v)
3113 3158
3114 3159 def doinsertscost():
3115 3160 d = util.lrucachedict(size, maxcost=costlimit)
3116 3161 for i, v in enumerate(setseq):
3117 3162 d.insert(v, v, cost=costs[i])
3118 3163
3119 3164 def dosets():
3120 3165 d = util.lrucachedict(size)
3121 3166 for v in setseq:
3122 3167 d[v] = v
3123 3168
3124 3169 # Mixed mode randomly performs gets and sets with eviction.
3125 3170 mixedops = []
3126 3171 for i in _xrange(mixed):
3127 3172 r = random.randint(0, 100)
3128 3173 if r < mixedgetfreq:
3129 3174 op = 0
3130 3175 else:
3131 3176 op = 1
3132 3177
3133 3178 mixedops.append((op,
3134 3179 random.randint(0, size * 2),
3135 3180 random.choice(costrange)))
3136 3181
3137 3182 def domixed():
3138 3183 d = util.lrucachedict(size)
3139 3184
3140 3185 for op, v, cost in mixedops:
3141 3186 if op == 0:
3142 3187 try:
3143 3188 d[v]
3144 3189 except KeyError:
3145 3190 pass
3146 3191 else:
3147 3192 d[v] = v
3148 3193
3149 3194 def domixedcost():
3150 3195 d = util.lrucachedict(size, maxcost=costlimit)
3151 3196
3152 3197 for op, v, cost in mixedops:
3153 3198 if op == 0:
3154 3199 try:
3155 3200 d[v]
3156 3201 except KeyError:
3157 3202 pass
3158 3203 else:
3159 3204 d.insert(v, v, cost=cost)
3160 3205
3161 3206 benches = [
3162 3207 (doinit, b'init'),
3163 3208 ]
3164 3209
3165 3210 if costlimit:
3166 3211 benches.extend([
3167 3212 (dogetscost, b'gets w/ cost limit'),
3168 3213 (doinsertscost, b'inserts w/ cost limit'),
3169 3214 (domixedcost, b'mixed w/ cost limit'),
3170 3215 ])
3171 3216 else:
3172 3217 benches.extend([
3173 3218 (dogets, b'gets'),
3174 3219 (doinserts, b'inserts'),
3175 3220 (dosets, b'sets'),
3176 3221 (domixed, b'mixed')
3177 3222 ])
3178 3223
3179 3224 for fn, title in benches:
3180 3225 timer, fm = gettimer(ui, opts)
3181 3226 timer(fn, title=title)
3182 3227 fm.end()
3183 3228
3184 3229 @command(b'perfwrite', formatteropts)
3185 3230 def perfwrite(ui, repo, **opts):
3186 3231 """microbenchmark ui.write
3187 3232 """
3188 3233 opts = _byteskwargs(opts)
3189 3234
3190 3235 timer, fm = gettimer(ui, opts)
3191 3236 def write():
3192 3237 for i in range(100000):
3193 3238 ui.write((b'Testing write performance\n'))
3194 3239 timer(write)
3195 3240 fm.end()
3196 3241
3197 3242 def uisetup(ui):
3198 3243 if (util.safehasattr(cmdutil, b'openrevlog') and
3199 3244 not util.safehasattr(commands, b'debugrevlogopts')):
3200 3245 # for "historical portability":
3201 3246 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
3202 3247 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
3203 3248 # openrevlog() should cause failure, because it has been
3204 3249 # available since 3.5 (or 49c583ca48c4).
3205 3250 def openrevlog(orig, repo, cmd, file_, opts):
3206 3251 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
3207 3252 raise error.Abort(b"This version doesn't support --dir option",
3208 3253 hint=b"use 3.5 or later")
3209 3254 return orig(repo, cmd, file_, opts)
3210 3255 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
3211 3256
3212 3257 @command(b'perfprogress', formatteropts + [
3213 3258 (b'', b'topic', b'topic', b'topic for progress messages'),
3214 3259 (b'c', b'total', 1000000, b'total value we are progressing to'),
3215 3260 ], norepo=True)
3216 3261 def perfprogress(ui, topic=None, total=None, **opts):
3217 3262 """printing of progress bars"""
3218 3263 opts = _byteskwargs(opts)
3219 3264
3220 3265 timer, fm = gettimer(ui, opts)
3221 3266
3222 3267 def doprogress():
3223 3268 with ui.makeprogress(topic, total=total) as progress:
3224 3269 for i in _xrange(total):
3225 3270 progress.increment()
3226 3271
3227 3272 timer(doprogress)
3228 3273 fm.end()
General Comments 0
You need to be logged in to leave comments. Login now