##// END OF EJS Templates
perf: add a --stats argument to perfhelper-mergecopies...
marmoute -
r43211:3a1ad3ae default
parent child Browse files
Show More
@@ -1,3094 +1,3228 b''
1 1 # perf.py - performance test routines
2 2 '''helper extension to measure performance
3 3
4 4 Configurations
5 5 ==============
6 6
7 7 ``perf``
8 8 --------
9 9
10 10 ``all-timing``
11 11 When set, additional statistics will be reported for each benchmark: best,
12 12 worst, median average. If not set only the best timing is reported
13 13 (default: off).
14 14
15 15 ``presleep``
16 16 number of second to wait before any group of runs (default: 1)
17 17
18 18 ``pre-run``
19 19 number of run to perform before starting measurement.
20 20
21 21 ``profile-benchmark``
22 22 Enable profiling for the benchmarked section.
23 23 (The first iteration is benchmarked)
24 24
25 25 ``run-limits``
26 26 Control the number of runs each benchmark will perform. The option value
27 27 should be a list of `<time>-<numberofrun>` pairs. After each run the
28 28 conditions are considered in order with the following logic:
29 29
30 30 If benchmark has been running for <time> seconds, and we have performed
31 31 <numberofrun> iterations, stop the benchmark,
32 32
33 33 The default value is: `3.0-100, 10.0-3`
34 34
35 35 ``stub``
36 36 When set, benchmarks will only be run once, useful for testing
37 37 (default: off)
38 38 '''
39 39
40 40 # "historical portability" policy of perf.py:
41 41 #
42 42 # We have to do:
43 43 # - make perf.py "loadable" with as wide Mercurial version as possible
44 44 # This doesn't mean that perf commands work correctly with that Mercurial.
45 45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
46 46 # - make historical perf command work correctly with as wide Mercurial
47 47 # version as possible
48 48 #
49 49 # We have to do, if possible with reasonable cost:
50 50 # - make recent perf command for historical feature work correctly
51 51 # with early Mercurial
52 52 #
53 53 # We don't have to do:
54 54 # - make perf command for recent feature work correctly with early
55 55 # Mercurial
56 56
57 57 from __future__ import absolute_import
58 58 import contextlib
59 59 import functools
60 60 import gc
61 61 import os
62 62 import random
63 63 import shutil
64 64 import struct
65 65 import sys
66 66 import tempfile
67 67 import threading
68 68 import time
69 69 from mercurial import (
70 70 changegroup,
71 71 cmdutil,
72 72 commands,
73 73 copies,
74 74 error,
75 75 extensions,
76 76 hg,
77 77 mdiff,
78 78 merge,
79 79 revlog,
80 80 util,
81 81 )
82 82
83 83 # for "historical portability":
84 84 # try to import modules separately (in dict order), and ignore
85 85 # failure, because these aren't available with early Mercurial
86 86 try:
87 87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
88 88 except ImportError:
89 89 pass
90 90 try:
91 91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
92 92 except ImportError:
93 93 pass
94 94 try:
95 95 from mercurial import registrar # since 3.7 (or 37d50250b696)
96 96 dir(registrar) # forcibly load it
97 97 except ImportError:
98 98 registrar = None
99 99 try:
100 100 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
101 101 except ImportError:
102 102 pass
103 103 try:
104 104 from mercurial.utils import repoviewutil # since 5.0
105 105 except ImportError:
106 106 repoviewutil = None
107 107 try:
108 108 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
109 109 except ImportError:
110 110 pass
111 111 try:
112 112 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
113 113 except ImportError:
114 114 pass
115 115
116 116 try:
117 117 from mercurial import profiling
118 118 except ImportError:
119 119 profiling = None
120 120
121 121 def identity(a):
122 122 return a
123 123
124 124 try:
125 125 from mercurial import pycompat
126 126 getargspec = pycompat.getargspec # added to module after 4.5
127 127 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
128 128 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
129 129 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
130 130 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
131 131 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
132 132 if pycompat.ispy3:
133 133 _maxint = sys.maxsize # per py3 docs for replacing maxint
134 134 else:
135 135 _maxint = sys.maxint
136 136 except (NameError, ImportError, AttributeError):
137 137 import inspect
138 138 getargspec = inspect.getargspec
139 139 _byteskwargs = identity
140 140 _bytestr = str
141 141 fsencode = identity # no py3 support
142 142 _maxint = sys.maxint # no py3 support
143 143 _sysstr = lambda x: x # no py3 support
144 144 _xrange = xrange
145 145
146 146 try:
147 147 # 4.7+
148 148 queue = pycompat.queue.Queue
149 149 except (NameError, AttributeError, ImportError):
150 150 # <4.7.
151 151 try:
152 152 queue = pycompat.queue
153 153 except (NameError, AttributeError, ImportError):
154 154 import Queue as queue
155 155
156 156 try:
157 157 from mercurial import logcmdutil
158 158 makelogtemplater = logcmdutil.maketemplater
159 159 except (AttributeError, ImportError):
160 160 try:
161 161 makelogtemplater = cmdutil.makelogtemplater
162 162 except (AttributeError, ImportError):
163 163 makelogtemplater = None
164 164
165 165 # for "historical portability":
166 166 # define util.safehasattr forcibly, because util.safehasattr has been
167 167 # available since 1.9.3 (or 94b200a11cf7)
168 168 _undefined = object()
169 169 def safehasattr(thing, attr):
170 170 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
171 171 setattr(util, 'safehasattr', safehasattr)
172 172
173 173 # for "historical portability":
174 174 # define util.timer forcibly, because util.timer has been available
175 175 # since ae5d60bb70c9
176 176 if safehasattr(time, 'perf_counter'):
177 177 util.timer = time.perf_counter
178 178 elif os.name == b'nt':
179 179 util.timer = time.clock
180 180 else:
181 181 util.timer = time.time
182 182
183 183 # for "historical portability":
184 184 # use locally defined empty option list, if formatteropts isn't
185 185 # available, because commands.formatteropts has been available since
186 186 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
187 187 # available since 2.2 (or ae5f92e154d3)
188 188 formatteropts = getattr(cmdutil, "formatteropts",
189 189 getattr(commands, "formatteropts", []))
190 190
191 191 # for "historical portability":
192 192 # use locally defined option list, if debugrevlogopts isn't available,
193 193 # because commands.debugrevlogopts has been available since 3.7 (or
194 194 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
195 195 # since 1.9 (or a79fea6b3e77).
196 196 revlogopts = getattr(cmdutil, "debugrevlogopts",
197 197 getattr(commands, "debugrevlogopts", [
198 198 (b'c', b'changelog', False, (b'open changelog')),
199 199 (b'm', b'manifest', False, (b'open manifest')),
200 200 (b'', b'dir', False, (b'open directory manifest')),
201 201 ]))
202 202
203 203 cmdtable = {}
204 204
205 205 # for "historical portability":
206 206 # define parsealiases locally, because cmdutil.parsealiases has been
207 207 # available since 1.5 (or 6252852b4332)
208 208 def parsealiases(cmd):
209 209 return cmd.split(b"|")
210 210
211 211 if safehasattr(registrar, 'command'):
212 212 command = registrar.command(cmdtable)
213 213 elif safehasattr(cmdutil, 'command'):
214 214 command = cmdutil.command(cmdtable)
215 215 if b'norepo' not in getargspec(command).args:
216 216 # for "historical portability":
217 217 # wrap original cmdutil.command, because "norepo" option has
218 218 # been available since 3.1 (or 75a96326cecb)
219 219 _command = command
220 220 def command(name, options=(), synopsis=None, norepo=False):
221 221 if norepo:
222 222 commands.norepo += b' %s' % b' '.join(parsealiases(name))
223 223 return _command(name, list(options), synopsis)
224 224 else:
225 225 # for "historical portability":
226 226 # define "@command" annotation locally, because cmdutil.command
227 227 # has been available since 1.9 (or 2daa5179e73f)
228 228 def command(name, options=(), synopsis=None, norepo=False):
229 229 def decorator(func):
230 230 if synopsis:
231 231 cmdtable[name] = func, list(options), synopsis
232 232 else:
233 233 cmdtable[name] = func, list(options)
234 234 if norepo:
235 235 commands.norepo += b' %s' % b' '.join(parsealiases(name))
236 236 return func
237 237 return decorator
238 238
239 239 try:
240 240 import mercurial.registrar
241 241 import mercurial.configitems
242 242 configtable = {}
243 243 configitem = mercurial.registrar.configitem(configtable)
244 244 configitem(b'perf', b'presleep',
245 245 default=mercurial.configitems.dynamicdefault,
246 246 experimental=True,
247 247 )
248 248 configitem(b'perf', b'stub',
249 249 default=mercurial.configitems.dynamicdefault,
250 250 experimental=True,
251 251 )
252 252 configitem(b'perf', b'parentscount',
253 253 default=mercurial.configitems.dynamicdefault,
254 254 experimental=True,
255 255 )
256 256 configitem(b'perf', b'all-timing',
257 257 default=mercurial.configitems.dynamicdefault,
258 258 experimental=True,
259 259 )
260 260 configitem(b'perf', b'pre-run',
261 261 default=mercurial.configitems.dynamicdefault,
262 262 )
263 263 configitem(b'perf', b'profile-benchmark',
264 264 default=mercurial.configitems.dynamicdefault,
265 265 )
266 266 configitem(b'perf', b'run-limits',
267 267 default=mercurial.configitems.dynamicdefault,
268 268 experimental=True,
269 269 )
270 270 except (ImportError, AttributeError):
271 271 pass
272 272 except TypeError:
273 273 # compatibility fix for a11fd395e83f
274 274 # hg version: 5.2
275 275 configitem(b'perf', b'presleep',
276 276 default=mercurial.configitems.dynamicdefault,
277 277 )
278 278 configitem(b'perf', b'stub',
279 279 default=mercurial.configitems.dynamicdefault,
280 280 )
281 281 configitem(b'perf', b'parentscount',
282 282 default=mercurial.configitems.dynamicdefault,
283 283 )
284 284 configitem(b'perf', b'all-timing',
285 285 default=mercurial.configitems.dynamicdefault,
286 286 )
287 287 configitem(b'perf', b'pre-run',
288 288 default=mercurial.configitems.dynamicdefault,
289 289 )
290 290 configitem(b'perf', b'profile-benchmark',
291 291 default=mercurial.configitems.dynamicdefault,
292 292 )
293 293 configitem(b'perf', b'run-limits',
294 294 default=mercurial.configitems.dynamicdefault,
295 295 )
296 296
297 297 def getlen(ui):
298 298 if ui.configbool(b"perf", b"stub", False):
299 299 return lambda x: 1
300 300 return len
301 301
302 302 class noop(object):
303 303 """dummy context manager"""
304 304 def __enter__(self):
305 305 pass
306 306 def __exit__(self, *args):
307 307 pass
308 308
309 309 NOOPCTX = noop()
310 310
311 311 def gettimer(ui, opts=None):
312 312 """return a timer function and formatter: (timer, formatter)
313 313
314 314 This function exists to gather the creation of formatter in a single
315 315 place instead of duplicating it in all performance commands."""
316 316
317 317 # enforce an idle period before execution to counteract power management
318 318 # experimental config: perf.presleep
319 319 time.sleep(getint(ui, b"perf", b"presleep", 1))
320 320
321 321 if opts is None:
322 322 opts = {}
323 323 # redirect all to stderr unless buffer api is in use
324 324 if not ui._buffers:
325 325 ui = ui.copy()
326 326 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
327 327 if uifout:
328 328 # for "historical portability":
329 329 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
330 330 uifout.set(ui.ferr)
331 331
332 332 # get a formatter
333 333 uiformatter = getattr(ui, 'formatter', None)
334 334 if uiformatter:
335 335 fm = uiformatter(b'perf', opts)
336 336 else:
337 337 # for "historical portability":
338 338 # define formatter locally, because ui.formatter has been
339 339 # available since 2.2 (or ae5f92e154d3)
340 340 from mercurial import node
341 341 class defaultformatter(object):
342 342 """Minimized composition of baseformatter and plainformatter
343 343 """
344 344 def __init__(self, ui, topic, opts):
345 345 self._ui = ui
346 346 if ui.debugflag:
347 347 self.hexfunc = node.hex
348 348 else:
349 349 self.hexfunc = node.short
350 350 def __nonzero__(self):
351 351 return False
352 352 __bool__ = __nonzero__
353 353 def startitem(self):
354 354 pass
355 355 def data(self, **data):
356 356 pass
357 357 def write(self, fields, deftext, *fielddata, **opts):
358 358 self._ui.write(deftext % fielddata, **opts)
359 359 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
360 360 if cond:
361 361 self._ui.write(deftext % fielddata, **opts)
362 362 def plain(self, text, **opts):
363 363 self._ui.write(text, **opts)
364 364 def end(self):
365 365 pass
366 366 fm = defaultformatter(ui, b'perf', opts)
367 367
368 368 # stub function, runs code only once instead of in a loop
369 369 # experimental config: perf.stub
370 370 if ui.configbool(b"perf", b"stub", False):
371 371 return functools.partial(stub_timer, fm), fm
372 372
373 373 # experimental config: perf.all-timing
374 374 displayall = ui.configbool(b"perf", b"all-timing", False)
375 375
376 376 # experimental config: perf.run-limits
377 377 limitspec = ui.configlist(b"perf", b"run-limits", [])
378 378 limits = []
379 379 for item in limitspec:
380 380 parts = item.split(b'-', 1)
381 381 if len(parts) < 2:
382 382 ui.warn((b'malformatted run limit entry, missing "-": %s\n'
383 383 % item))
384 384 continue
385 385 try:
386 386 time_limit = float(_sysstr(parts[0]))
387 387 except ValueError as e:
388 388 ui.warn((b'malformatted run limit entry, %s: %s\n'
389 389 % (_bytestr(e), item)))
390 390 continue
391 391 try:
392 392 run_limit = int(_sysstr(parts[1]))
393 393 except ValueError as e:
394 394 ui.warn((b'malformatted run limit entry, %s: %s\n'
395 395 % (_bytestr(e), item)))
396 396 continue
397 397 limits.append((time_limit, run_limit))
398 398 if not limits:
399 399 limits = DEFAULTLIMITS
400 400
401 401 profiler = None
402 402 if profiling is not None:
403 403 if ui.configbool(b"perf", b"profile-benchmark", False):
404 404 profiler = profiling.profile(ui)
405 405
406 406 prerun = getint(ui, b"perf", b"pre-run", 0)
407 407 t = functools.partial(_timer, fm, displayall=displayall, limits=limits,
408 408 prerun=prerun, profiler=profiler)
409 409 return t, fm
410 410
411 411 def stub_timer(fm, func, setup=None, title=None):
412 412 if setup is not None:
413 413 setup()
414 414 func()
415 415
416 416 @contextlib.contextmanager
417 417 def timeone():
418 418 r = []
419 419 ostart = os.times()
420 420 cstart = util.timer()
421 421 yield r
422 422 cstop = util.timer()
423 423 ostop = os.times()
424 424 a, b = ostart, ostop
425 425 r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
426 426
427 427
428 428 # list of stop condition (elapsed time, minimal run count)
429 429 DEFAULTLIMITS = (
430 430 (3.0, 100),
431 431 (10.0, 3),
432 432 )
433 433
434 434 def _timer(fm, func, setup=None, title=None, displayall=False,
435 435 limits=DEFAULTLIMITS, prerun=0, profiler=None):
436 436 gc.collect()
437 437 results = []
438 438 begin = util.timer()
439 439 count = 0
440 440 if profiler is None:
441 441 profiler = NOOPCTX
442 442 for i in range(prerun):
443 443 if setup is not None:
444 444 setup()
445 445 func()
446 446 keepgoing = True
447 447 while keepgoing:
448 448 if setup is not None:
449 449 setup()
450 450 with profiler:
451 451 with timeone() as item:
452 452 r = func()
453 453 profiler = NOOPCTX
454 454 count += 1
455 455 results.append(item[0])
456 456 cstop = util.timer()
457 457 # Look for a stop condition.
458 458 elapsed = cstop - begin
459 459 for t, mincount in limits:
460 460 if elapsed >= t and count >= mincount:
461 461 keepgoing = False
462 462 break
463 463
464 464 formatone(fm, results, title=title, result=r,
465 465 displayall=displayall)
466 466
467 467 def formatone(fm, timings, title=None, result=None, displayall=False):
468 468
469 469 count = len(timings)
470 470
471 471 fm.startitem()
472 472
473 473 if title:
474 474 fm.write(b'title', b'! %s\n', title)
475 475 if result:
476 476 fm.write(b'result', b'! result: %s\n', result)
477 477 def display(role, entry):
478 478 prefix = b''
479 479 if role != b'best':
480 480 prefix = b'%s.' % role
481 481 fm.plain(b'!')
482 482 fm.write(prefix + b'wall', b' wall %f', entry[0])
483 483 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
484 484 fm.write(prefix + b'user', b' user %f', entry[1])
485 485 fm.write(prefix + b'sys', b' sys %f', entry[2])
486 486 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
487 487 fm.plain(b'\n')
488 488 timings.sort()
489 489 min_val = timings[0]
490 490 display(b'best', min_val)
491 491 if displayall:
492 492 max_val = timings[-1]
493 493 display(b'max', max_val)
494 494 avg = tuple([sum(x) / count for x in zip(*timings)])
495 495 display(b'avg', avg)
496 496 median = timings[len(timings) // 2]
497 497 display(b'median', median)
498 498
499 499 # utilities for historical portability
500 500
501 501 def getint(ui, section, name, default):
502 502 # for "historical portability":
503 503 # ui.configint has been available since 1.9 (or fa2b596db182)
504 504 v = ui.config(section, name, None)
505 505 if v is None:
506 506 return default
507 507 try:
508 508 return int(v)
509 509 except ValueError:
510 510 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
511 511 % (section, name, v))
512 512
513 513 def safeattrsetter(obj, name, ignoremissing=False):
514 514 """Ensure that 'obj' has 'name' attribute before subsequent setattr
515 515
516 516 This function is aborted, if 'obj' doesn't have 'name' attribute
517 517 at runtime. This avoids overlooking removal of an attribute, which
518 518 breaks assumption of performance measurement, in the future.
519 519
520 520 This function returns the object to (1) assign a new value, and
521 521 (2) restore an original value to the attribute.
522 522
523 523 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
524 524 abortion, and this function returns None. This is useful to
525 525 examine an attribute, which isn't ensured in all Mercurial
526 526 versions.
527 527 """
528 528 if not util.safehasattr(obj, name):
529 529 if ignoremissing:
530 530 return None
531 531 raise error.Abort((b"missing attribute %s of %s might break assumption"
532 532 b" of performance measurement") % (name, obj))
533 533
534 534 origvalue = getattr(obj, _sysstr(name))
535 535 class attrutil(object):
536 536 def set(self, newvalue):
537 537 setattr(obj, _sysstr(name), newvalue)
538 538 def restore(self):
539 539 setattr(obj, _sysstr(name), origvalue)
540 540
541 541 return attrutil()
542 542
543 543 # utilities to examine each internal API changes
544 544
545 545 def getbranchmapsubsettable():
546 546 # for "historical portability":
547 547 # subsettable is defined in:
548 548 # - branchmap since 2.9 (or 175c6fd8cacc)
549 549 # - repoview since 2.5 (or 59a9f18d4587)
550 550 # - repoviewutil since 5.0
551 551 for mod in (branchmap, repoview, repoviewutil):
552 552 subsettable = getattr(mod, 'subsettable', None)
553 553 if subsettable:
554 554 return subsettable
555 555
556 556 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
557 557 # branchmap and repoview modules exist, but subsettable attribute
558 558 # doesn't)
559 559 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
560 560 hint=b"use 2.5 or later")
561 561
562 562 def getsvfs(repo):
563 563 """Return appropriate object to access files under .hg/store
564 564 """
565 565 # for "historical portability":
566 566 # repo.svfs has been available since 2.3 (or 7034365089bf)
567 567 svfs = getattr(repo, 'svfs', None)
568 568 if svfs:
569 569 return svfs
570 570 else:
571 571 return getattr(repo, 'sopener')
572 572
573 573 def getvfs(repo):
574 574 """Return appropriate object to access files under .hg
575 575 """
576 576 # for "historical portability":
577 577 # repo.vfs has been available since 2.3 (or 7034365089bf)
578 578 vfs = getattr(repo, 'vfs', None)
579 579 if vfs:
580 580 return vfs
581 581 else:
582 582 return getattr(repo, 'opener')
583 583
584 584 def repocleartagscachefunc(repo):
585 585 """Return the function to clear tags cache according to repo internal API
586 586 """
587 587 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
588 588 # in this case, setattr(repo, '_tagscache', None) or so isn't
589 589 # correct way to clear tags cache, because existing code paths
590 590 # expect _tagscache to be a structured object.
591 591 def clearcache():
592 592 # _tagscache has been filteredpropertycache since 2.5 (or
593 593 # 98c867ac1330), and delattr() can't work in such case
594 594 if b'_tagscache' in vars(repo):
595 595 del repo.__dict__[b'_tagscache']
596 596 return clearcache
597 597
598 598 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
599 599 if repotags: # since 1.4 (or 5614a628d173)
600 600 return lambda : repotags.set(None)
601 601
602 602 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
603 603 if repotagscache: # since 0.6 (or d7df759d0e97)
604 604 return lambda : repotagscache.set(None)
605 605
606 606 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
607 607 # this point, but it isn't so problematic, because:
608 608 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
609 609 # in perftags() causes failure soon
610 610 # - perf.py itself has been available since 1.1 (or eb240755386d)
611 611 raise error.Abort((b"tags API of this hg command is unknown"))
612 612
613 613 # utilities to clear cache
614 614
615 615 def clearfilecache(obj, attrname):
616 616 unfiltered = getattr(obj, 'unfiltered', None)
617 617 if unfiltered is not None:
618 618 obj = obj.unfiltered()
619 619 if attrname in vars(obj):
620 620 delattr(obj, attrname)
621 621 obj._filecache.pop(attrname, None)
622 622
623 623 def clearchangelog(repo):
624 624 if repo is not repo.unfiltered():
625 625 object.__setattr__(repo, r'_clcachekey', None)
626 626 object.__setattr__(repo, r'_clcache', None)
627 627 clearfilecache(repo.unfiltered(), 'changelog')
628 628
629 629 # perf commands
630 630
631 631 @command(b'perfwalk', formatteropts)
632 632 def perfwalk(ui, repo, *pats, **opts):
633 633 opts = _byteskwargs(opts)
634 634 timer, fm = gettimer(ui, opts)
635 635 m = scmutil.match(repo[None], pats, {})
636 636 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
637 637 ignored=False))))
638 638 fm.end()
639 639
640 640 @command(b'perfannotate', formatteropts)
641 641 def perfannotate(ui, repo, f, **opts):
642 642 opts = _byteskwargs(opts)
643 643 timer, fm = gettimer(ui, opts)
644 644 fc = repo[b'.'][f]
645 645 timer(lambda: len(fc.annotate(True)))
646 646 fm.end()
647 647
648 648 @command(b'perfstatus',
649 649 [(b'u', b'unknown', False,
650 650 b'ask status to look for unknown files')] + formatteropts)
651 651 def perfstatus(ui, repo, **opts):
652 652 opts = _byteskwargs(opts)
653 653 #m = match.always(repo.root, repo.getcwd())
654 654 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
655 655 # False))))
656 656 timer, fm = gettimer(ui, opts)
657 657 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
658 658 fm.end()
659 659
660 660 @command(b'perfaddremove', formatteropts)
661 661 def perfaddremove(ui, repo, **opts):
662 662 opts = _byteskwargs(opts)
663 663 timer, fm = gettimer(ui, opts)
664 664 try:
665 665 oldquiet = repo.ui.quiet
666 666 repo.ui.quiet = True
667 667 matcher = scmutil.match(repo[None])
668 668 opts[b'dry_run'] = True
669 669 if b'uipathfn' in getargspec(scmutil.addremove).args:
670 670 uipathfn = scmutil.getuipathfn(repo)
671 671 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
672 672 else:
673 673 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
674 674 finally:
675 675 repo.ui.quiet = oldquiet
676 676 fm.end()
677 677
678 678 def clearcaches(cl):
679 679 # behave somewhat consistently across internal API changes
680 680 if util.safehasattr(cl, b'clearcaches'):
681 681 cl.clearcaches()
682 682 elif util.safehasattr(cl, b'_nodecache'):
683 683 from mercurial.node import nullid, nullrev
684 684 cl._nodecache = {nullid: nullrev}
685 685 cl._nodepos = None
686 686
687 687 @command(b'perfheads', formatteropts)
688 688 def perfheads(ui, repo, **opts):
689 689 """benchmark the computation of a changelog heads"""
690 690 opts = _byteskwargs(opts)
691 691 timer, fm = gettimer(ui, opts)
692 692 cl = repo.changelog
693 693 def s():
694 694 clearcaches(cl)
695 695 def d():
696 696 len(cl.headrevs())
697 697 timer(d, setup=s)
698 698 fm.end()
699 699
700 700 @command(b'perftags', formatteropts+
701 701 [
702 702 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
703 703 ])
704 704 def perftags(ui, repo, **opts):
705 705 opts = _byteskwargs(opts)
706 706 timer, fm = gettimer(ui, opts)
707 707 repocleartagscache = repocleartagscachefunc(repo)
708 708 clearrevlogs = opts[b'clear_revlogs']
709 709 def s():
710 710 if clearrevlogs:
711 711 clearchangelog(repo)
712 712 clearfilecache(repo.unfiltered(), 'manifest')
713 713 repocleartagscache()
714 714 def t():
715 715 return len(repo.tags())
716 716 timer(t, setup=s)
717 717 fm.end()
718 718
719 719 @command(b'perfancestors', formatteropts)
720 720 def perfancestors(ui, repo, **opts):
721 721 opts = _byteskwargs(opts)
722 722 timer, fm = gettimer(ui, opts)
723 723 heads = repo.changelog.headrevs()
724 724 def d():
725 725 for a in repo.changelog.ancestors(heads):
726 726 pass
727 727 timer(d)
728 728 fm.end()
729 729
730 730 @command(b'perfancestorset', formatteropts)
731 731 def perfancestorset(ui, repo, revset, **opts):
732 732 opts = _byteskwargs(opts)
733 733 timer, fm = gettimer(ui, opts)
734 734 revs = repo.revs(revset)
735 735 heads = repo.changelog.headrevs()
736 736 def d():
737 737 s = repo.changelog.ancestors(heads)
738 738 for rev in revs:
739 739 rev in s
740 740 timer(d)
741 741 fm.end()
742 742
743 743 @command(b'perfdiscovery', formatteropts, b'PATH')
744 744 def perfdiscovery(ui, repo, path, **opts):
745 745 """benchmark discovery between local repo and the peer at given path
746 746 """
747 747 repos = [repo, None]
748 748 timer, fm = gettimer(ui, opts)
749 749 path = ui.expandpath(path)
750 750
751 751 def s():
752 752 repos[1] = hg.peer(ui, opts, path)
753 753 def d():
754 754 setdiscovery.findcommonheads(ui, *repos)
755 755 timer(d, setup=s)
756 756 fm.end()
757 757
758 758 @command(b'perfbookmarks', formatteropts +
759 759 [
760 760 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
761 761 ])
762 762 def perfbookmarks(ui, repo, **opts):
763 763 """benchmark parsing bookmarks from disk to memory"""
764 764 opts = _byteskwargs(opts)
765 765 timer, fm = gettimer(ui, opts)
766 766
767 767 clearrevlogs = opts[b'clear_revlogs']
768 768 def s():
769 769 if clearrevlogs:
770 770 clearchangelog(repo)
771 771 clearfilecache(repo, b'_bookmarks')
772 772 def d():
773 773 repo._bookmarks
774 774 timer(d, setup=s)
775 775 fm.end()
776 776
777 777 @command(b'perfbundleread', formatteropts, b'BUNDLE')
778 778 def perfbundleread(ui, repo, bundlepath, **opts):
779 779 """Benchmark reading of bundle files.
780 780
781 781 This command is meant to isolate the I/O part of bundle reading as
782 782 much as possible.
783 783 """
784 784 from mercurial import (
785 785 bundle2,
786 786 exchange,
787 787 streamclone,
788 788 )
789 789
790 790 opts = _byteskwargs(opts)
791 791
792 792 def makebench(fn):
793 793 def run():
794 794 with open(bundlepath, b'rb') as fh:
795 795 bundle = exchange.readbundle(ui, fh, bundlepath)
796 796 fn(bundle)
797 797
798 798 return run
799 799
800 800 def makereadnbytes(size):
801 801 def run():
802 802 with open(bundlepath, b'rb') as fh:
803 803 bundle = exchange.readbundle(ui, fh, bundlepath)
804 804 while bundle.read(size):
805 805 pass
806 806
807 807 return run
808 808
809 809 def makestdioread(size):
810 810 def run():
811 811 with open(bundlepath, b'rb') as fh:
812 812 while fh.read(size):
813 813 pass
814 814
815 815 return run
816 816
817 817 # bundle1
818 818
819 819 def deltaiter(bundle):
820 820 for delta in bundle.deltaiter():
821 821 pass
822 822
823 823 def iterchunks(bundle):
824 824 for chunk in bundle.getchunks():
825 825 pass
826 826
827 827 # bundle2
828 828
829 829 def forwardchunks(bundle):
830 830 for chunk in bundle._forwardchunks():
831 831 pass
832 832
833 833 def iterparts(bundle):
834 834 for part in bundle.iterparts():
835 835 pass
836 836
837 837 def iterpartsseekable(bundle):
838 838 for part in bundle.iterparts(seekable=True):
839 839 pass
840 840
841 841 def seek(bundle):
842 842 for part in bundle.iterparts(seekable=True):
843 843 part.seek(0, os.SEEK_END)
844 844
845 845 def makepartreadnbytes(size):
846 846 def run():
847 847 with open(bundlepath, b'rb') as fh:
848 848 bundle = exchange.readbundle(ui, fh, bundlepath)
849 849 for part in bundle.iterparts():
850 850 while part.read(size):
851 851 pass
852 852
853 853 return run
854 854
855 855 benches = [
856 856 (makestdioread(8192), b'read(8k)'),
857 857 (makestdioread(16384), b'read(16k)'),
858 858 (makestdioread(32768), b'read(32k)'),
859 859 (makestdioread(131072), b'read(128k)'),
860 860 ]
861 861
862 862 with open(bundlepath, b'rb') as fh:
863 863 bundle = exchange.readbundle(ui, fh, bundlepath)
864 864
865 865 if isinstance(bundle, changegroup.cg1unpacker):
866 866 benches.extend([
867 867 (makebench(deltaiter), b'cg1 deltaiter()'),
868 868 (makebench(iterchunks), b'cg1 getchunks()'),
869 869 (makereadnbytes(8192), b'cg1 read(8k)'),
870 870 (makereadnbytes(16384), b'cg1 read(16k)'),
871 871 (makereadnbytes(32768), b'cg1 read(32k)'),
872 872 (makereadnbytes(131072), b'cg1 read(128k)'),
873 873 ])
874 874 elif isinstance(bundle, bundle2.unbundle20):
875 875 benches.extend([
876 876 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
877 877 (makebench(iterparts), b'bundle2 iterparts()'),
878 878 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
879 879 (makebench(seek), b'bundle2 part seek()'),
880 880 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
881 881 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
882 882 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
883 883 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
884 884 ])
885 885 elif isinstance(bundle, streamclone.streamcloneapplier):
886 886 raise error.Abort(b'stream clone bundles not supported')
887 887 else:
888 888 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
889 889
890 890 for fn, title in benches:
891 891 timer, fm = gettimer(ui, opts)
892 892 timer(fn, title=title)
893 893 fm.end()
894 894
895 895 @command(b'perfchangegroupchangelog', formatteropts +
896 896 [(b'', b'cgversion', b'02', b'changegroup version'),
897 897 (b'r', b'rev', b'', b'revisions to add to changegroup')])
898 898 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
899 899 """Benchmark producing a changelog group for a changegroup.
900 900
901 901 This measures the time spent processing the changelog during a
902 902 bundle operation. This occurs during `hg bundle` and on a server
903 903 processing a `getbundle` wire protocol request (handles clones
904 904 and pull requests).
905 905
906 906 By default, all revisions are added to the changegroup.
907 907 """
908 908 opts = _byteskwargs(opts)
909 909 cl = repo.changelog
910 910 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
911 911 bundler = changegroup.getbundler(cgversion, repo)
912 912
913 913 def d():
914 914 state, chunks = bundler._generatechangelog(cl, nodes)
915 915 for chunk in chunks:
916 916 pass
917 917
918 918 timer, fm = gettimer(ui, opts)
919 919
920 920 # Terminal printing can interfere with timing. So disable it.
921 921 with ui.configoverride({(b'progress', b'disable'): True}):
922 922 timer(d)
923 923
924 924 fm.end()
925 925
926 926 @command(b'perfdirs', formatteropts)
927 927 def perfdirs(ui, repo, **opts):
928 928 opts = _byteskwargs(opts)
929 929 timer, fm = gettimer(ui, opts)
930 930 dirstate = repo.dirstate
931 931 b'a' in dirstate
932 932 def d():
933 933 dirstate.hasdir(b'a')
934 934 del dirstate._map._dirs
935 935 timer(d)
936 936 fm.end()
937 937
938 938 @command(b'perfdirstate', formatteropts)
939 939 def perfdirstate(ui, repo, **opts):
940 940 opts = _byteskwargs(opts)
941 941 timer, fm = gettimer(ui, opts)
942 942 b"a" in repo.dirstate
943 943 def d():
944 944 repo.dirstate.invalidate()
945 945 b"a" in repo.dirstate
946 946 timer(d)
947 947 fm.end()
948 948
949 949 @command(b'perfdirstatedirs', formatteropts)
950 950 def perfdirstatedirs(ui, repo, **opts):
951 951 opts = _byteskwargs(opts)
952 952 timer, fm = gettimer(ui, opts)
953 953 b"a" in repo.dirstate
954 954 def d():
955 955 repo.dirstate.hasdir(b"a")
956 956 del repo.dirstate._map._dirs
957 957 timer(d)
958 958 fm.end()
959 959
960 960 @command(b'perfdirstatefoldmap', formatteropts)
961 961 def perfdirstatefoldmap(ui, repo, **opts):
962 962 opts = _byteskwargs(opts)
963 963 timer, fm = gettimer(ui, opts)
964 964 dirstate = repo.dirstate
965 965 b'a' in dirstate
966 966 def d():
967 967 dirstate._map.filefoldmap.get(b'a')
968 968 del dirstate._map.filefoldmap
969 969 timer(d)
970 970 fm.end()
971 971
972 972 @command(b'perfdirfoldmap', formatteropts)
973 973 def perfdirfoldmap(ui, repo, **opts):
974 974 opts = _byteskwargs(opts)
975 975 timer, fm = gettimer(ui, opts)
976 976 dirstate = repo.dirstate
977 977 b'a' in dirstate
978 978 def d():
979 979 dirstate._map.dirfoldmap.get(b'a')
980 980 del dirstate._map.dirfoldmap
981 981 del dirstate._map._dirs
982 982 timer(d)
983 983 fm.end()
984 984
985 985 @command(b'perfdirstatewrite', formatteropts)
986 986 def perfdirstatewrite(ui, repo, **opts):
987 987 opts = _byteskwargs(opts)
988 988 timer, fm = gettimer(ui, opts)
989 989 ds = repo.dirstate
990 990 b"a" in ds
991 991 def d():
992 992 ds._dirty = True
993 993 ds.write(repo.currenttransaction())
994 994 timer(d)
995 995 fm.end()
996 996
997 997 def _getmergerevs(repo, opts):
998 998 """parse command argument to return rev involved in merge
999 999
1000 1000 input: options dictionnary with `rev`, `from` and `bse`
1001 1001 output: (localctx, otherctx, basectx)
1002 1002 """
1003 1003 if opts[b'from']:
1004 1004 fromrev = scmutil.revsingle(repo, opts[b'from'])
1005 1005 wctx = repo[fromrev]
1006 1006 else:
1007 1007 wctx = repo[None]
1008 1008 # we don't want working dir files to be stat'd in the benchmark, so
1009 1009 # prime that cache
1010 1010 wctx.dirty()
1011 1011 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1012 1012 if opts[b'base']:
1013 1013 fromrev = scmutil.revsingle(repo, opts[b'base'])
1014 1014 ancestor = repo[fromrev]
1015 1015 else:
1016 1016 ancestor = wctx.ancestor(rctx)
1017 1017 return (wctx, rctx, ancestor)
1018 1018
1019 1019 @command(b'perfmergecalculate',
1020 1020 [
1021 1021 (b'r', b'rev', b'.', b'rev to merge against'),
1022 1022 (b'', b'from', b'', b'rev to merge from'),
1023 1023 (b'', b'base', b'', b'the revision to use as base'),
1024 1024 ] + formatteropts)
1025 1025 def perfmergecalculate(ui, repo, **opts):
1026 1026 opts = _byteskwargs(opts)
1027 1027 timer, fm = gettimer(ui, opts)
1028 1028
1029 1029 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1030 1030 def d():
1031 1031 # acceptremote is True because we don't want prompts in the middle of
1032 1032 # our benchmark
1033 1033 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
1034 1034 acceptremote=True, followcopies=True)
1035 1035 timer(d)
1036 1036 fm.end()
1037 1037
1038 1038 @command(b'perfmergecopies',
1039 1039 [
1040 1040 (b'r', b'rev', b'.', b'rev to merge against'),
1041 1041 (b'', b'from', b'', b'rev to merge from'),
1042 1042 (b'', b'base', b'', b'the revision to use as base'),
1043 1043 ] + formatteropts)
1044 1044 def perfmergecopies(ui, repo, **opts):
1045 1045 """measure runtime of `copies.mergecopies`"""
1046 1046 opts = _byteskwargs(opts)
1047 1047 timer, fm = gettimer(ui, opts)
1048 1048 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1049 1049 def d():
1050 1050 # acceptremote is True because we don't want prompts in the middle of
1051 1051 # our benchmark
1052 1052 copies.mergecopies(repo, wctx, rctx, ancestor)
1053 1053 timer(d)
1054 1054 fm.end()
1055 1055
1056 1056 @command(b'perfpathcopies', [], b"REV REV")
1057 1057 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1058 1058 """benchmark the copy tracing logic"""
1059 1059 opts = _byteskwargs(opts)
1060 1060 timer, fm = gettimer(ui, opts)
1061 1061 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1062 1062 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1063 1063 def d():
1064 1064 copies.pathcopies(ctx1, ctx2)
1065 1065 timer(d)
1066 1066 fm.end()
1067 1067
1068 1068 @command(b'perfphases',
1069 1069 [(b'', b'full', False, b'include file reading time too'),
1070 1070 ], b"")
1071 1071 def perfphases(ui, repo, **opts):
1072 1072 """benchmark phasesets computation"""
1073 1073 opts = _byteskwargs(opts)
1074 1074 timer, fm = gettimer(ui, opts)
1075 1075 _phases = repo._phasecache
1076 1076 full = opts.get(b'full')
1077 1077 def d():
1078 1078 phases = _phases
1079 1079 if full:
1080 1080 clearfilecache(repo, b'_phasecache')
1081 1081 phases = repo._phasecache
1082 1082 phases.invalidate()
1083 1083 phases.loadphaserevs(repo)
1084 1084 timer(d)
1085 1085 fm.end()
1086 1086
1087 1087 @command(b'perfphasesremote',
1088 1088 [], b"[DEST]")
1089 1089 def perfphasesremote(ui, repo, dest=None, **opts):
1090 1090 """benchmark time needed to analyse phases of the remote server"""
1091 1091 from mercurial.node import (
1092 1092 bin,
1093 1093 )
1094 1094 from mercurial import (
1095 1095 exchange,
1096 1096 hg,
1097 1097 phases,
1098 1098 )
1099 1099 opts = _byteskwargs(opts)
1100 1100 timer, fm = gettimer(ui, opts)
1101 1101
1102 1102 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
1103 1103 if not path:
1104 1104 raise error.Abort((b'default repository not configured!'),
1105 1105 hint=(b"see 'hg help config.paths'"))
1106 1106 dest = path.pushloc or path.loc
1107 1107 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
1108 1108 other = hg.peer(repo, opts, dest)
1109 1109
1110 1110 # easier to perform discovery through the operation
1111 1111 op = exchange.pushoperation(repo, other)
1112 1112 exchange._pushdiscoverychangeset(op)
1113 1113
1114 1114 remotesubset = op.fallbackheads
1115 1115
1116 1116 with other.commandexecutor() as e:
1117 1117 remotephases = e.callcommand(b'listkeys',
1118 1118 {b'namespace': b'phases'}).result()
1119 1119 del other
1120 1120 publishing = remotephases.get(b'publishing', False)
1121 1121 if publishing:
1122 1122 ui.status((b'publishing: yes\n'))
1123 1123 else:
1124 1124 ui.status((b'publishing: no\n'))
1125 1125
1126 1126 nodemap = repo.changelog.nodemap
1127 1127 nonpublishroots = 0
1128 1128 for nhex, phase in remotephases.iteritems():
1129 1129 if nhex == b'publishing': # ignore data related to publish option
1130 1130 continue
1131 1131 node = bin(nhex)
1132 1132 if node in nodemap and int(phase):
1133 1133 nonpublishroots += 1
1134 1134 ui.status((b'number of roots: %d\n') % len(remotephases))
1135 1135 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
1136 1136 def d():
1137 1137 phases.remotephasessummary(repo,
1138 1138 remotesubset,
1139 1139 remotephases)
1140 1140 timer(d)
1141 1141 fm.end()
1142 1142
1143 1143 @command(b'perfmanifest',[
1144 1144 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1145 1145 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1146 1146 ] + formatteropts, b'REV|NODE')
1147 1147 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1148 1148 """benchmark the time to read a manifest from disk and return a usable
1149 1149 dict-like object
1150 1150
1151 1151 Manifest caches are cleared before retrieval."""
1152 1152 opts = _byteskwargs(opts)
1153 1153 timer, fm = gettimer(ui, opts)
1154 1154 if not manifest_rev:
1155 1155 ctx = scmutil.revsingle(repo, rev, rev)
1156 1156 t = ctx.manifestnode()
1157 1157 else:
1158 1158 from mercurial.node import bin
1159 1159
1160 1160 if len(rev) == 40:
1161 1161 t = bin(rev)
1162 1162 else:
1163 1163 try:
1164 1164 rev = int(rev)
1165 1165
1166 1166 if util.safehasattr(repo.manifestlog, b'getstorage'):
1167 1167 t = repo.manifestlog.getstorage(b'').node(rev)
1168 1168 else:
1169 1169 t = repo.manifestlog._revlog.lookup(rev)
1170 1170 except ValueError:
1171 1171 raise error.Abort(b'manifest revision must be integer or full '
1172 1172 b'node')
1173 1173 def d():
1174 1174 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1175 1175 repo.manifestlog[t].read()
1176 1176 timer(d)
1177 1177 fm.end()
1178 1178
1179 1179 @command(b'perfchangeset', formatteropts)
1180 1180 def perfchangeset(ui, repo, rev, **opts):
1181 1181 opts = _byteskwargs(opts)
1182 1182 timer, fm = gettimer(ui, opts)
1183 1183 n = scmutil.revsingle(repo, rev).node()
1184 1184 def d():
1185 1185 repo.changelog.read(n)
1186 1186 #repo.changelog._cache = None
1187 1187 timer(d)
1188 1188 fm.end()
1189 1189
1190 1190 @command(b'perfignore', formatteropts)
1191 1191 def perfignore(ui, repo, **opts):
1192 1192 """benchmark operation related to computing ignore"""
1193 1193 opts = _byteskwargs(opts)
1194 1194 timer, fm = gettimer(ui, opts)
1195 1195 dirstate = repo.dirstate
1196 1196
1197 1197 def setupone():
1198 1198 dirstate.invalidate()
1199 1199 clearfilecache(dirstate, b'_ignore')
1200 1200
1201 1201 def runone():
1202 1202 dirstate._ignore
1203 1203
1204 1204 timer(runone, setup=setupone, title=b"load")
1205 1205 fm.end()
1206 1206
1207 1207 @command(b'perfindex', [
1208 1208 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1209 1209 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1210 1210 ] + formatteropts)
1211 1211 def perfindex(ui, repo, **opts):
1212 1212 """benchmark index creation time followed by a lookup
1213 1213
1214 1214 The default is to look `tip` up. Depending on the index implementation,
1215 1215 the revision looked up can matters. For example, an implementation
1216 1216 scanning the index will have a faster lookup time for `--rev tip` than for
1217 1217 `--rev 0`. The number of looked up revisions and their order can also
1218 1218 matters.
1219 1219
1220 1220 Example of useful set to test:
1221 1221 * tip
1222 1222 * 0
1223 1223 * -10:
1224 1224 * :10
1225 1225 * -10: + :10
1226 1226 * :10: + -10:
1227 1227 * -10000:
1228 1228 * -10000: + 0
1229 1229
1230 1230 It is not currently possible to check for lookup of a missing node. For
1231 1231 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1232 1232 import mercurial.revlog
1233 1233 opts = _byteskwargs(opts)
1234 1234 timer, fm = gettimer(ui, opts)
1235 1235 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1236 1236 if opts[b'no_lookup']:
1237 1237 if opts['rev']:
1238 1238 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1239 1239 nodes = []
1240 1240 elif not opts[b'rev']:
1241 1241 nodes = [repo[b"tip"].node()]
1242 1242 else:
1243 1243 revs = scmutil.revrange(repo, opts[b'rev'])
1244 1244 cl = repo.changelog
1245 1245 nodes = [cl.node(r) for r in revs]
1246 1246
1247 1247 unfi = repo.unfiltered()
1248 1248 # find the filecache func directly
1249 1249 # This avoid polluting the benchmark with the filecache logic
1250 1250 makecl = unfi.__class__.changelog.func
1251 1251 def setup():
1252 1252 # probably not necessary, but for good measure
1253 1253 clearchangelog(unfi)
1254 1254 def d():
1255 1255 cl = makecl(unfi)
1256 1256 for n in nodes:
1257 1257 cl.rev(n)
1258 1258 timer(d, setup=setup)
1259 1259 fm.end()
1260 1260
1261 1261 @command(b'perfnodemap', [
1262 1262 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1263 1263 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1264 1264 ] + formatteropts)
1265 1265 def perfnodemap(ui, repo, **opts):
1266 1266 """benchmark the time necessary to look up revision from a cold nodemap
1267 1267
1268 1268 Depending on the implementation, the amount and order of revision we look
1269 1269 up can varies. Example of useful set to test:
1270 1270 * tip
1271 1271 * 0
1272 1272 * -10:
1273 1273 * :10
1274 1274 * -10: + :10
1275 1275 * :10: + -10:
1276 1276 * -10000:
1277 1277 * -10000: + 0
1278 1278
1279 1279 The command currently focus on valid binary lookup. Benchmarking for
1280 1280 hexlookup, prefix lookup and missing lookup would also be valuable.
1281 1281 """
1282 1282 import mercurial.revlog
1283 1283 opts = _byteskwargs(opts)
1284 1284 timer, fm = gettimer(ui, opts)
1285 1285 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1286 1286
1287 1287 unfi = repo.unfiltered()
1288 1288 clearcaches = opts['clear_caches']
1289 1289 # find the filecache func directly
1290 1290 # This avoid polluting the benchmark with the filecache logic
1291 1291 makecl = unfi.__class__.changelog.func
1292 1292 if not opts[b'rev']:
1293 1293 raise error.Abort('use --rev to specify revisions to look up')
1294 1294 revs = scmutil.revrange(repo, opts[b'rev'])
1295 1295 cl = repo.changelog
1296 1296 nodes = [cl.node(r) for r in revs]
1297 1297
1298 1298 # use a list to pass reference to a nodemap from one closure to the next
1299 1299 nodeget = [None]
1300 1300 def setnodeget():
1301 1301 # probably not necessary, but for good measure
1302 1302 clearchangelog(unfi)
1303 1303 nodeget[0] = makecl(unfi).nodemap.get
1304 1304
1305 1305 def d():
1306 1306 get = nodeget[0]
1307 1307 for n in nodes:
1308 1308 get(n)
1309 1309
1310 1310 setup = None
1311 1311 if clearcaches:
1312 1312 def setup():
1313 1313 setnodeget()
1314 1314 else:
1315 1315 setnodeget()
1316 1316 d() # prewarm the data structure
1317 1317 timer(d, setup=setup)
1318 1318 fm.end()
1319 1319
1320 1320 @command(b'perfstartup', formatteropts)
1321 1321 def perfstartup(ui, repo, **opts):
1322 1322 opts = _byteskwargs(opts)
1323 1323 timer, fm = gettimer(ui, opts)
1324 1324 def d():
1325 1325 if os.name != r'nt':
1326 1326 os.system(b"HGRCPATH= %s version -q > /dev/null" %
1327 1327 fsencode(sys.argv[0]))
1328 1328 else:
1329 1329 os.environ[r'HGRCPATH'] = r' '
1330 1330 os.system(r"%s version -q > NUL" % sys.argv[0])
1331 1331 timer(d)
1332 1332 fm.end()
1333 1333
1334 1334 @command(b'perfparents', formatteropts)
1335 1335 def perfparents(ui, repo, **opts):
1336 1336 """benchmark the time necessary to fetch one changeset's parents.
1337 1337
1338 1338 The fetch is done using the `node identifier`, traversing all object layers
1339 1339 from the repository object. The first N revisions will be used for this
1340 1340 benchmark. N is controlled by the ``perf.parentscount`` config option
1341 1341 (default: 1000).
1342 1342 """
1343 1343 opts = _byteskwargs(opts)
1344 1344 timer, fm = gettimer(ui, opts)
1345 1345 # control the number of commits perfparents iterates over
1346 1346 # experimental config: perf.parentscount
1347 1347 count = getint(ui, b"perf", b"parentscount", 1000)
1348 1348 if len(repo.changelog) < count:
1349 1349 raise error.Abort(b"repo needs %d commits for this test" % count)
1350 1350 repo = repo.unfiltered()
1351 1351 nl = [repo.changelog.node(i) for i in _xrange(count)]
1352 1352 def d():
1353 1353 for n in nl:
1354 1354 repo.changelog.parents(n)
1355 1355 timer(d)
1356 1356 fm.end()
1357 1357
1358 1358 @command(b'perfctxfiles', formatteropts)
1359 1359 def perfctxfiles(ui, repo, x, **opts):
1360 1360 opts = _byteskwargs(opts)
1361 1361 x = int(x)
1362 1362 timer, fm = gettimer(ui, opts)
1363 1363 def d():
1364 1364 len(repo[x].files())
1365 1365 timer(d)
1366 1366 fm.end()
1367 1367
1368 1368 @command(b'perfrawfiles', formatteropts)
1369 1369 def perfrawfiles(ui, repo, x, **opts):
1370 1370 opts = _byteskwargs(opts)
1371 1371 x = int(x)
1372 1372 timer, fm = gettimer(ui, opts)
1373 1373 cl = repo.changelog
1374 1374 def d():
1375 1375 len(cl.read(x)[3])
1376 1376 timer(d)
1377 1377 fm.end()
1378 1378
1379 1379 @command(b'perflookup', formatteropts)
1380 1380 def perflookup(ui, repo, rev, **opts):
1381 1381 opts = _byteskwargs(opts)
1382 1382 timer, fm = gettimer(ui, opts)
1383 1383 timer(lambda: len(repo.lookup(rev)))
1384 1384 fm.end()
1385 1385
1386 1386 @command(b'perflinelogedits',
1387 1387 [(b'n', b'edits', 10000, b'number of edits'),
1388 1388 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1389 1389 ], norepo=True)
1390 1390 def perflinelogedits(ui, **opts):
1391 1391 from mercurial import linelog
1392 1392
1393 1393 opts = _byteskwargs(opts)
1394 1394
1395 1395 edits = opts[b'edits']
1396 1396 maxhunklines = opts[b'max_hunk_lines']
1397 1397
1398 1398 maxb1 = 100000
1399 1399 random.seed(0)
1400 1400 randint = random.randint
1401 1401 currentlines = 0
1402 1402 arglist = []
1403 1403 for rev in _xrange(edits):
1404 1404 a1 = randint(0, currentlines)
1405 1405 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1406 1406 b1 = randint(0, maxb1)
1407 1407 b2 = randint(b1, b1 + maxhunklines)
1408 1408 currentlines += (b2 - b1) - (a2 - a1)
1409 1409 arglist.append((rev, a1, a2, b1, b2))
1410 1410
1411 1411 def d():
1412 1412 ll = linelog.linelog()
1413 1413 for args in arglist:
1414 1414 ll.replacelines(*args)
1415 1415
1416 1416 timer, fm = gettimer(ui, opts)
1417 1417 timer(d)
1418 1418 fm.end()
1419 1419
1420 1420 @command(b'perfrevrange', formatteropts)
1421 1421 def perfrevrange(ui, repo, *specs, **opts):
1422 1422 opts = _byteskwargs(opts)
1423 1423 timer, fm = gettimer(ui, opts)
1424 1424 revrange = scmutil.revrange
1425 1425 timer(lambda: len(revrange(repo, specs)))
1426 1426 fm.end()
1427 1427
1428 1428 @command(b'perfnodelookup', formatteropts)
1429 1429 def perfnodelookup(ui, repo, rev, **opts):
1430 1430 opts = _byteskwargs(opts)
1431 1431 timer, fm = gettimer(ui, opts)
1432 1432 import mercurial.revlog
1433 1433 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1434 1434 n = scmutil.revsingle(repo, rev).node()
1435 1435 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1436 1436 def d():
1437 1437 cl.rev(n)
1438 1438 clearcaches(cl)
1439 1439 timer(d)
1440 1440 fm.end()
1441 1441
1442 1442 @command(b'perflog',
1443 1443 [(b'', b'rename', False, b'ask log to follow renames')
1444 1444 ] + formatteropts)
1445 1445 def perflog(ui, repo, rev=None, **opts):
1446 1446 opts = _byteskwargs(opts)
1447 1447 if rev is None:
1448 1448 rev=[]
1449 1449 timer, fm = gettimer(ui, opts)
1450 1450 ui.pushbuffer()
1451 1451 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1452 1452 copies=opts.get(b'rename')))
1453 1453 ui.popbuffer()
1454 1454 fm.end()
1455 1455
1456 1456 @command(b'perfmoonwalk', formatteropts)
1457 1457 def perfmoonwalk(ui, repo, **opts):
1458 1458 """benchmark walking the changelog backwards
1459 1459
1460 1460 This also loads the changelog data for each revision in the changelog.
1461 1461 """
1462 1462 opts = _byteskwargs(opts)
1463 1463 timer, fm = gettimer(ui, opts)
1464 1464 def moonwalk():
1465 1465 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1466 1466 ctx = repo[i]
1467 1467 ctx.branch() # read changelog data (in addition to the index)
1468 1468 timer(moonwalk)
1469 1469 fm.end()
1470 1470
1471 1471 @command(b'perftemplating',
1472 1472 [(b'r', b'rev', [], b'revisions to run the template on'),
1473 1473 ] + formatteropts)
1474 1474 def perftemplating(ui, repo, testedtemplate=None, **opts):
1475 1475 """test the rendering time of a given template"""
1476 1476 if makelogtemplater is None:
1477 1477 raise error.Abort((b"perftemplating not available with this Mercurial"),
1478 1478 hint=b"use 4.3 or later")
1479 1479
1480 1480 opts = _byteskwargs(opts)
1481 1481
1482 1482 nullui = ui.copy()
1483 1483 nullui.fout = open(os.devnull, r'wb')
1484 1484 nullui.disablepager()
1485 1485 revs = opts.get(b'rev')
1486 1486 if not revs:
1487 1487 revs = [b'all()']
1488 1488 revs = list(scmutil.revrange(repo, revs))
1489 1489
1490 1490 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1491 1491 b' {author|person}: {desc|firstline}\n')
1492 1492 if testedtemplate is None:
1493 1493 testedtemplate = defaulttemplate
1494 1494 displayer = makelogtemplater(nullui, repo, testedtemplate)
1495 1495 def format():
1496 1496 for r in revs:
1497 1497 ctx = repo[r]
1498 1498 displayer.show(ctx)
1499 1499 displayer.flush(ctx)
1500 1500
1501 1501 timer, fm = gettimer(ui, opts)
1502 1502 timer(format)
1503 1503 fm.end()
1504 1504
1505 def _displaystats(ui, opts, entries, data):
1506 pass
1507 # use a second formatter because the data are quite different, not sure
1508 # how it flies with the templater.
1509 fm = ui.formatter(b'perf-stats', opts)
1510 for key, title in entries:
1511 values = data[key]
1512 nbvalues = len(data)
1513 values.sort()
1514 stats = {
1515 'key': key,
1516 'title': title,
1517 'nbitems': len(values),
1518 'min': values[0][0],
1519 '10%': values[(nbvalues * 10) // 100][0],
1520 '25%': values[(nbvalues * 25) // 100][0],
1521 '50%': values[(nbvalues * 50) // 100][0],
1522 '75%': values[(nbvalues * 75) // 100][0],
1523 '80%': values[(nbvalues * 80) // 100][0],
1524 '85%': values[(nbvalues * 85) // 100][0],
1525 '90%': values[(nbvalues * 90) // 100][0],
1526 '95%': values[(nbvalues * 95) // 100][0],
1527 '99%': values[(nbvalues * 99) // 100][0],
1528 'max': values[-1][0],
1529 }
1530 fm.startitem()
1531 fm.data(**stats)
1532 # make node pretty for the human output
1533 fm.plain('### %s (%d items)\n' % (title, len(values)))
1534 lines = [
1535 'min',
1536 '10%',
1537 '25%',
1538 '50%',
1539 '75%',
1540 '80%',
1541 '85%',
1542 '90%',
1543 '95%',
1544 '99%',
1545 'max',
1546 ]
1547 for l in lines:
1548 fm.plain('%s: %s\n' % (l, stats[l]))
1549 fm.end()
1550
1505 1551 @command(b'perfhelper-mergecopies', formatteropts +
1506 1552 [
1507 1553 (b'r', b'revs', [], b'restrict search to these revisions'),
1508 1554 (b'', b'timing', False, b'provides extra data (costly)'),
1555 (b'', b'stats', False, b'provides statistic about the measured data'),
1509 1556 ])
1510 1557 def perfhelpermergecopies(ui, repo, revs=[], **opts):
1511 1558 """find statistics about potential parameters for `perfmergecopies`
1512 1559
1513 1560 This command find (base, p1, p2) triplet relevant for copytracing
1514 1561 benchmarking in the context of a merge. It reports values for some of the
1515 1562 parameters that impact merge copy tracing time during merge.
1516 1563
1517 1564 If `--timing` is set, rename detection is run and the associated timing
1518 1565 will be reported. The extra details come at the cost of slower command
1519 1566 execution.
1520 1567
1521 1568 Since rename detection is only run once, other factors might easily
1522 1569 affect the precision of the timing. However it should give a good
1523 1570 approximation of which revision triplets are very costly.
1524 1571 """
1525 1572 opts = _byteskwargs(opts)
1526 1573 fm = ui.formatter(b'perf', opts)
1527 1574 dotiming = opts[b'timing']
1575 dostats = opts[b'stats']
1528 1576
1529 1577 output_template = [
1530 1578 ("base", "%(base)12s"),
1531 1579 ("p1", "%(p1.node)12s"),
1532 1580 ("p2", "%(p2.node)12s"),
1533 1581 ("p1.nb-revs", "%(p1.nbrevs)12d"),
1534 1582 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
1535 1583 ("p1.renames", "%(p1.renamedfiles)12d"),
1536 1584 ("p1.time", "%(p1.time)12.3f"),
1537 1585 ("p2.nb-revs", "%(p2.nbrevs)12d"),
1538 1586 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
1539 1587 ("p2.renames", "%(p2.renamedfiles)12d"),
1540 1588 ("p2.time", "%(p2.time)12.3f"),
1541 1589 ("renames", "%(nbrenamedfiles)12d"),
1542 1590 ("total.time", "%(time)12.3f"),
1543 1591 ]
1544 1592 if not dotiming:
1545 1593 output_template = [i for i in output_template
1546 1594 if not ('time' in i[0] or 'renames' in i[0])]
1547 1595 header_names = [h for (h, v) in output_template]
1548 1596 output = ' '.join([v for (h, v) in output_template]) + '\n'
1549 1597 header = ' '.join(['%12s'] * len(header_names)) + '\n'
1550 1598 fm.plain(header % tuple(header_names))
1551 1599
1552 1600 if not revs:
1553 1601 revs = ['all()']
1554 1602 revs = scmutil.revrange(repo, revs)
1555 1603
1604 if dostats:
1605 alldata = {
1606 'nbrevs': [],
1607 'nbmissingfiles': [],
1608 }
1609 if dotiming:
1610 alldata['parentnbrenames'] = []
1611 alldata['totalnbrenames'] = []
1612 alldata['parenttime'] = []
1613 alldata['totaltime'] = []
1614
1556 1615 roi = repo.revs('merge() and %ld', revs)
1557 1616 for r in roi:
1558 1617 ctx = repo[r]
1559 1618 p1 = ctx.p1()
1560 1619 p2 = ctx.p2()
1561 1620 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
1562 1621 for b in bases:
1563 1622 b = repo[b]
1564 1623 p1missing = copies._computeforwardmissing(b, p1)
1565 1624 p2missing = copies._computeforwardmissing(b, p2)
1566 1625 data = {
1567 1626 b'base': b.hex(),
1568 1627 b'p1.node': p1.hex(),
1569 1628 b'p1.nbrevs': len(repo.revs('%d::%d', b.rev(), p1.rev())),
1570 1629 b'p1.nbmissingfiles': len(p1missing),
1571 1630 b'p2.node': p2.hex(),
1572 1631 b'p2.nbrevs': len(repo.revs('%d::%d', b.rev(), p2.rev())),
1573 1632 b'p2.nbmissingfiles': len(p2missing),
1574 1633 }
1634 if dostats:
1635 if p1missing:
1636 alldata['nbrevs'].append((
1637 data['p1.nbrevs'],
1638 b.hex(),
1639 p1.hex()
1640 ))
1641 alldata['nbmissingfiles'].append((
1642 data['p1.nbmissingfiles'],
1643 b.hex(),
1644 p1.hex()
1645 ))
1646 if p2missing:
1647 alldata['nbrevs'].append((
1648 data['p2.nbrevs'],
1649 b.hex(),
1650 p2.hex()
1651 ))
1652 alldata['nbmissingfiles'].append((
1653 data['p2.nbmissingfiles'],
1654 b.hex(),
1655 p2.hex()
1656 ))
1575 1657 if dotiming:
1576 1658 begin = util.timer()
1577 1659 mergedata = copies.mergecopies(repo, p1, p2, b)
1578 1660 end = util.timer()
1579 1661 # not very stable timing since we did only one run
1580 1662 data['time'] = end - begin
1581 1663 # mergedata contains five dicts: "copy", "movewithdir",
1582 1664 # "diverge", "renamedelete" and "dirmove".
1583 1665 # The first 4 are about renamed file so lets count that.
1584 1666 renames = len(mergedata[0])
1585 1667 renames += len(mergedata[1])
1586 1668 renames += len(mergedata[2])
1587 1669 renames += len(mergedata[3])
1588 1670 data['nbrenamedfiles'] = renames
1589 1671 begin = util.timer()
1590 1672 p1renames = copies.pathcopies(b, p1)
1591 1673 end = util.timer()
1592 1674 data['p1.time'] = end - begin
1593 1675 begin = util.timer()
1594 1676 p2renames = copies.pathcopies(b, p2)
1595 1677 data['p2.time'] = end - begin
1596 1678 end = util.timer()
1597 1679 data['p1.renamedfiles'] = len(p1renames)
1598 1680 data['p2.renamedfiles'] = len(p2renames)
1681
1682 if dostats:
1683 if p1missing:
1684 alldata['parentnbrenames'].append((
1685 data['p1.renamedfiles'],
1686 b.hex(),
1687 p1.hex()
1688 ))
1689 alldata['parenttime'].append((
1690 data['p1.time'],
1691 b.hex(),
1692 p1.hex()
1693 ))
1694 if p2missing:
1695 alldata['parentnbrenames'].append((
1696 data['p2.renamedfiles'],
1697 b.hex(),
1698 p2.hex()
1699 ))
1700 alldata['parenttime'].append((
1701 data['p2.time'],
1702 b.hex(),
1703 p2.hex()
1704 ))
1705 if p1missing or p2missing:
1706 alldata['totalnbrenames'].append((
1707 data['nbrenamedfiles'],
1708 b.hex(),
1709 p1.hex(),
1710 p2.hex()
1711 ))
1712 alldata['totaltime'].append((
1713 data['time'],
1714 b.hex(),
1715 p1.hex(),
1716 p2.hex()
1717 ))
1599 1718 fm.startitem()
1600 1719 fm.data(**data)
1601 1720 # make node pretty for the human output
1602 1721 out = data.copy()
1603 1722 out['base'] = fm.hexfunc(b.node())
1604 1723 out['p1.node'] = fm.hexfunc(p1.node())
1605 1724 out['p2.node'] = fm.hexfunc(p2.node())
1606 1725 fm.plain(output % out)
1607 1726
1608 1727 fm.end()
1728 if dostats:
1729 # use a second formatter because the data are quite different, not sure
1730 # how it flies with the templater.
1731 entries = [
1732 ('nbrevs', 'number of revision covered'),
1733 ('nbmissingfiles', 'number of missing files at head'),
1734 ]
1735 if dotiming:
1736 entries.append(('parentnbrenames',
1737 'rename from one parent to base'))
1738 entries.append(('totalnbrenames', 'total number of renames'))
1739 entries.append(('parenttime', 'time for one parent'))
1740 entries.append(('totaltime', 'time for both parents'))
1741 _displaystats(ui, opts, entries, alldata)
1742
1609 1743
1610 1744 @command(b'perfhelper-pathcopies', formatteropts +
1611 1745 [
1612 1746 (b'r', b'revs', [], b'restrict search to these revisions'),
1613 1747 (b'', b'timing', False, b'provides extra data (costly)'),
1614 1748 ])
1615 1749 def perfhelperpathcopies(ui, repo, revs=[], **opts):
1616 1750 """find statistic about potential parameters for the `perftracecopies`
1617 1751
1618 1752 This command find source-destination pair relevant for copytracing testing.
1619 1753 It report value for some of the parameters that impact copy tracing time.
1620 1754
1621 1755 If `--timing` is set, rename detection is run and the associated timing
1622 1756 will be reported. The extra details comes at the cost of a slower command
1623 1757 execution.
1624 1758
1625 1759 Since the rename detection is only run once, other factors might easily
1626 1760 affect the precision of the timing. However it should give a good
1627 1761 approximation of which revision pairs are very costly.
1628 1762 """
1629 1763 opts = _byteskwargs(opts)
1630 1764 fm = ui.formatter(b'perf', opts)
1631 1765 dotiming = opts[b'timing']
1632 1766
1633 1767 if dotiming:
1634 1768 header = '%12s %12s %12s %12s %12s %12s\n'
1635 1769 output = ("%(source)12s %(destination)12s "
1636 1770 "%(nbrevs)12d %(nbmissingfiles)12d "
1637 1771 "%(nbrenamedfiles)12d %(time)18.5f\n")
1638 1772 header_names = ("source", "destination", "nb-revs", "nb-files",
1639 1773 "nb-renames", "time")
1640 1774 fm.plain(header % header_names)
1641 1775 else:
1642 1776 header = '%12s %12s %12s %12s\n'
1643 1777 output = ("%(source)12s %(destination)12s "
1644 1778 "%(nbrevs)12d %(nbmissingfiles)12d\n")
1645 1779 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
1646 1780
1647 1781 if not revs:
1648 1782 revs = ['all()']
1649 1783 revs = scmutil.revrange(repo, revs)
1650 1784
1651 1785 roi = repo.revs('merge() and %ld', revs)
1652 1786 for r in roi:
1653 1787 ctx = repo[r]
1654 1788 p1 = ctx.p1().rev()
1655 1789 p2 = ctx.p2().rev()
1656 1790 bases = repo.changelog._commonancestorsheads(p1, p2)
1657 1791 for p in (p1, p2):
1658 1792 for b in bases:
1659 1793 base = repo[b]
1660 1794 parent = repo[p]
1661 1795 missing = copies._computeforwardmissing(base, parent)
1662 1796 if not missing:
1663 1797 continue
1664 1798 data = {
1665 1799 b'source': base.hex(),
1666 1800 b'destination': parent.hex(),
1667 1801 b'nbrevs': len(repo.revs('%d::%d', b, p)),
1668 1802 b'nbmissingfiles': len(missing),
1669 1803 }
1670 1804 if dotiming:
1671 1805 begin = util.timer()
1672 1806 renames = copies.pathcopies(base, parent)
1673 1807 end = util.timer()
1674 1808 # not very stable timing since we did only one run
1675 1809 data['time'] = end - begin
1676 1810 data['nbrenamedfiles'] = len(renames)
1677 1811 fm.startitem()
1678 1812 fm.data(**data)
1679 1813 out = data.copy()
1680 1814 out['source'] = fm.hexfunc(base.node())
1681 1815 out['destination'] = fm.hexfunc(parent.node())
1682 1816 fm.plain(output % out)
1683 1817
1684 1818 fm.end()
1685 1819
1686 1820 @command(b'perfcca', formatteropts)
1687 1821 def perfcca(ui, repo, **opts):
1688 1822 opts = _byteskwargs(opts)
1689 1823 timer, fm = gettimer(ui, opts)
1690 1824 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1691 1825 fm.end()
1692 1826
1693 1827 @command(b'perffncacheload', formatteropts)
1694 1828 def perffncacheload(ui, repo, **opts):
1695 1829 opts = _byteskwargs(opts)
1696 1830 timer, fm = gettimer(ui, opts)
1697 1831 s = repo.store
1698 1832 def d():
1699 1833 s.fncache._load()
1700 1834 timer(d)
1701 1835 fm.end()
1702 1836
1703 1837 @command(b'perffncachewrite', formatteropts)
1704 1838 def perffncachewrite(ui, repo, **opts):
1705 1839 opts = _byteskwargs(opts)
1706 1840 timer, fm = gettimer(ui, opts)
1707 1841 s = repo.store
1708 1842 lock = repo.lock()
1709 1843 s.fncache._load()
1710 1844 tr = repo.transaction(b'perffncachewrite')
1711 1845 tr.addbackup(b'fncache')
1712 1846 def d():
1713 1847 s.fncache._dirty = True
1714 1848 s.fncache.write(tr)
1715 1849 timer(d)
1716 1850 tr.close()
1717 1851 lock.release()
1718 1852 fm.end()
1719 1853
1720 1854 @command(b'perffncacheencode', formatteropts)
1721 1855 def perffncacheencode(ui, repo, **opts):
1722 1856 opts = _byteskwargs(opts)
1723 1857 timer, fm = gettimer(ui, opts)
1724 1858 s = repo.store
1725 1859 s.fncache._load()
1726 1860 def d():
1727 1861 for p in s.fncache.entries:
1728 1862 s.encode(p)
1729 1863 timer(d)
1730 1864 fm.end()
1731 1865
1732 1866 def _bdiffworker(q, blocks, xdiff, ready, done):
1733 1867 while not done.is_set():
1734 1868 pair = q.get()
1735 1869 while pair is not None:
1736 1870 if xdiff:
1737 1871 mdiff.bdiff.xdiffblocks(*pair)
1738 1872 elif blocks:
1739 1873 mdiff.bdiff.blocks(*pair)
1740 1874 else:
1741 1875 mdiff.textdiff(*pair)
1742 1876 q.task_done()
1743 1877 pair = q.get()
1744 1878 q.task_done() # for the None one
1745 1879 with ready:
1746 1880 ready.wait()
1747 1881
1748 1882 def _manifestrevision(repo, mnode):
1749 1883 ml = repo.manifestlog
1750 1884
1751 1885 if util.safehasattr(ml, b'getstorage'):
1752 1886 store = ml.getstorage(b'')
1753 1887 else:
1754 1888 store = ml._revlog
1755 1889
1756 1890 return store.revision(mnode)
1757 1891
1758 1892 @command(b'perfbdiff', revlogopts + formatteropts + [
1759 1893 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1760 1894 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1761 1895 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1762 1896 (b'', b'blocks', False, b'test computing diffs into blocks'),
1763 1897 (b'', b'xdiff', False, b'use xdiff algorithm'),
1764 1898 ],
1765 1899
1766 1900 b'-c|-m|FILE REV')
1767 1901 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1768 1902 """benchmark a bdiff between revisions
1769 1903
1770 1904 By default, benchmark a bdiff between its delta parent and itself.
1771 1905
1772 1906 With ``--count``, benchmark bdiffs between delta parents and self for N
1773 1907 revisions starting at the specified revision.
1774 1908
1775 1909 With ``--alldata``, assume the requested revision is a changeset and
1776 1910 measure bdiffs for all changes related to that changeset (manifest
1777 1911 and filelogs).
1778 1912 """
1779 1913 opts = _byteskwargs(opts)
1780 1914
1781 1915 if opts[b'xdiff'] and not opts[b'blocks']:
1782 1916 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1783 1917
1784 1918 if opts[b'alldata']:
1785 1919 opts[b'changelog'] = True
1786 1920
1787 1921 if opts.get(b'changelog') or opts.get(b'manifest'):
1788 1922 file_, rev = None, file_
1789 1923 elif rev is None:
1790 1924 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1791 1925
1792 1926 blocks = opts[b'blocks']
1793 1927 xdiff = opts[b'xdiff']
1794 1928 textpairs = []
1795 1929
1796 1930 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1797 1931
1798 1932 startrev = r.rev(r.lookup(rev))
1799 1933 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1800 1934 if opts[b'alldata']:
1801 1935 # Load revisions associated with changeset.
1802 1936 ctx = repo[rev]
1803 1937 mtext = _manifestrevision(repo, ctx.manifestnode())
1804 1938 for pctx in ctx.parents():
1805 1939 pman = _manifestrevision(repo, pctx.manifestnode())
1806 1940 textpairs.append((pman, mtext))
1807 1941
1808 1942 # Load filelog revisions by iterating manifest delta.
1809 1943 man = ctx.manifest()
1810 1944 pman = ctx.p1().manifest()
1811 1945 for filename, change in pman.diff(man).items():
1812 1946 fctx = repo.file(filename)
1813 1947 f1 = fctx.revision(change[0][0] or -1)
1814 1948 f2 = fctx.revision(change[1][0] or -1)
1815 1949 textpairs.append((f1, f2))
1816 1950 else:
1817 1951 dp = r.deltaparent(rev)
1818 1952 textpairs.append((r.revision(dp), r.revision(rev)))
1819 1953
1820 1954 withthreads = threads > 0
1821 1955 if not withthreads:
1822 1956 def d():
1823 1957 for pair in textpairs:
1824 1958 if xdiff:
1825 1959 mdiff.bdiff.xdiffblocks(*pair)
1826 1960 elif blocks:
1827 1961 mdiff.bdiff.blocks(*pair)
1828 1962 else:
1829 1963 mdiff.textdiff(*pair)
1830 1964 else:
1831 1965 q = queue()
1832 1966 for i in _xrange(threads):
1833 1967 q.put(None)
1834 1968 ready = threading.Condition()
1835 1969 done = threading.Event()
1836 1970 for i in _xrange(threads):
1837 1971 threading.Thread(target=_bdiffworker,
1838 1972 args=(q, blocks, xdiff, ready, done)).start()
1839 1973 q.join()
1840 1974 def d():
1841 1975 for pair in textpairs:
1842 1976 q.put(pair)
1843 1977 for i in _xrange(threads):
1844 1978 q.put(None)
1845 1979 with ready:
1846 1980 ready.notify_all()
1847 1981 q.join()
1848 1982 timer, fm = gettimer(ui, opts)
1849 1983 timer(d)
1850 1984 fm.end()
1851 1985
1852 1986 if withthreads:
1853 1987 done.set()
1854 1988 for i in _xrange(threads):
1855 1989 q.put(None)
1856 1990 with ready:
1857 1991 ready.notify_all()
1858 1992
1859 1993 @command(b'perfunidiff', revlogopts + formatteropts + [
1860 1994 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1861 1995 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
1862 1996 ], b'-c|-m|FILE REV')
1863 1997 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1864 1998 """benchmark a unified diff between revisions
1865 1999
1866 2000 This doesn't include any copy tracing - it's just a unified diff
1867 2001 of the texts.
1868 2002
1869 2003 By default, benchmark a diff between its delta parent and itself.
1870 2004
1871 2005 With ``--count``, benchmark diffs between delta parents and self for N
1872 2006 revisions starting at the specified revision.
1873 2007
1874 2008 With ``--alldata``, assume the requested revision is a changeset and
1875 2009 measure diffs for all changes related to that changeset (manifest
1876 2010 and filelogs).
1877 2011 """
1878 2012 opts = _byteskwargs(opts)
1879 2013 if opts[b'alldata']:
1880 2014 opts[b'changelog'] = True
1881 2015
1882 2016 if opts.get(b'changelog') or opts.get(b'manifest'):
1883 2017 file_, rev = None, file_
1884 2018 elif rev is None:
1885 2019 raise error.CommandError(b'perfunidiff', b'invalid arguments')
1886 2020
1887 2021 textpairs = []
1888 2022
1889 2023 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
1890 2024
1891 2025 startrev = r.rev(r.lookup(rev))
1892 2026 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1893 2027 if opts[b'alldata']:
1894 2028 # Load revisions associated with changeset.
1895 2029 ctx = repo[rev]
1896 2030 mtext = _manifestrevision(repo, ctx.manifestnode())
1897 2031 for pctx in ctx.parents():
1898 2032 pman = _manifestrevision(repo, pctx.manifestnode())
1899 2033 textpairs.append((pman, mtext))
1900 2034
1901 2035 # Load filelog revisions by iterating manifest delta.
1902 2036 man = ctx.manifest()
1903 2037 pman = ctx.p1().manifest()
1904 2038 for filename, change in pman.diff(man).items():
1905 2039 fctx = repo.file(filename)
1906 2040 f1 = fctx.revision(change[0][0] or -1)
1907 2041 f2 = fctx.revision(change[1][0] or -1)
1908 2042 textpairs.append((f1, f2))
1909 2043 else:
1910 2044 dp = r.deltaparent(rev)
1911 2045 textpairs.append((r.revision(dp), r.revision(rev)))
1912 2046
1913 2047 def d():
1914 2048 for left, right in textpairs:
1915 2049 # The date strings don't matter, so we pass empty strings.
1916 2050 headerlines, hunks = mdiff.unidiff(
1917 2051 left, b'', right, b'', b'left', b'right', binary=False)
1918 2052 # consume iterators in roughly the way patch.py does
1919 2053 b'\n'.join(headerlines)
1920 2054 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1921 2055 timer, fm = gettimer(ui, opts)
1922 2056 timer(d)
1923 2057 fm.end()
1924 2058
1925 2059 @command(b'perfdiffwd', formatteropts)
1926 2060 def perfdiffwd(ui, repo, **opts):
1927 2061 """Profile diff of working directory changes"""
1928 2062 opts = _byteskwargs(opts)
1929 2063 timer, fm = gettimer(ui, opts)
1930 2064 options = {
1931 2065 'w': 'ignore_all_space',
1932 2066 'b': 'ignore_space_change',
1933 2067 'B': 'ignore_blank_lines',
1934 2068 }
1935 2069
1936 2070 for diffopt in ('', 'w', 'b', 'B', 'wB'):
1937 2071 opts = dict((options[c], b'1') for c in diffopt)
1938 2072 def d():
1939 2073 ui.pushbuffer()
1940 2074 commands.diff(ui, repo, **opts)
1941 2075 ui.popbuffer()
1942 2076 diffopt = diffopt.encode('ascii')
1943 2077 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
1944 2078 timer(d, title=title)
1945 2079 fm.end()
1946 2080
1947 2081 @command(b'perfrevlogindex', revlogopts + formatteropts,
1948 2082 b'-c|-m|FILE')
1949 2083 def perfrevlogindex(ui, repo, file_=None, **opts):
1950 2084 """Benchmark operations against a revlog index.
1951 2085
1952 2086 This tests constructing a revlog instance, reading index data,
1953 2087 parsing index data, and performing various operations related to
1954 2088 index data.
1955 2089 """
1956 2090
1957 2091 opts = _byteskwargs(opts)
1958 2092
1959 2093 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
1960 2094
1961 2095 opener = getattr(rl, 'opener') # trick linter
1962 2096 indexfile = rl.indexfile
1963 2097 data = opener.read(indexfile)
1964 2098
1965 2099 header = struct.unpack(b'>I', data[0:4])[0]
1966 2100 version = header & 0xFFFF
1967 2101 if version == 1:
1968 2102 revlogio = revlog.revlogio()
1969 2103 inline = header & (1 << 16)
1970 2104 else:
1971 2105 raise error.Abort((b'unsupported revlog version: %d') % version)
1972 2106
1973 2107 rllen = len(rl)
1974 2108
1975 2109 node0 = rl.node(0)
1976 2110 node25 = rl.node(rllen // 4)
1977 2111 node50 = rl.node(rllen // 2)
1978 2112 node75 = rl.node(rllen // 4 * 3)
1979 2113 node100 = rl.node(rllen - 1)
1980 2114
1981 2115 allrevs = range(rllen)
1982 2116 allrevsrev = list(reversed(allrevs))
1983 2117 allnodes = [rl.node(rev) for rev in range(rllen)]
1984 2118 allnodesrev = list(reversed(allnodes))
1985 2119
1986 2120 def constructor():
1987 2121 revlog.revlog(opener, indexfile)
1988 2122
1989 2123 def read():
1990 2124 with opener(indexfile) as fh:
1991 2125 fh.read()
1992 2126
1993 2127 def parseindex():
1994 2128 revlogio.parseindex(data, inline)
1995 2129
1996 2130 def getentry(revornode):
1997 2131 index = revlogio.parseindex(data, inline)[0]
1998 2132 index[revornode]
1999 2133
2000 2134 def getentries(revs, count=1):
2001 2135 index = revlogio.parseindex(data, inline)[0]
2002 2136
2003 2137 for i in range(count):
2004 2138 for rev in revs:
2005 2139 index[rev]
2006 2140
2007 2141 def resolvenode(node):
2008 2142 nodemap = revlogio.parseindex(data, inline)[1]
2009 2143 # This only works for the C code.
2010 2144 if nodemap is None:
2011 2145 return
2012 2146
2013 2147 try:
2014 2148 nodemap[node]
2015 2149 except error.RevlogError:
2016 2150 pass
2017 2151
2018 2152 def resolvenodes(nodes, count=1):
2019 2153 nodemap = revlogio.parseindex(data, inline)[1]
2020 2154 if nodemap is None:
2021 2155 return
2022 2156
2023 2157 for i in range(count):
2024 2158 for node in nodes:
2025 2159 try:
2026 2160 nodemap[node]
2027 2161 except error.RevlogError:
2028 2162 pass
2029 2163
2030 2164 benches = [
2031 2165 (constructor, b'revlog constructor'),
2032 2166 (read, b'read'),
2033 2167 (parseindex, b'create index object'),
2034 2168 (lambda: getentry(0), b'retrieve index entry for rev 0'),
2035 2169 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
2036 2170 (lambda: resolvenode(node0), b'look up node at rev 0'),
2037 2171 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
2038 2172 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
2039 2173 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
2040 2174 (lambda: resolvenode(node100), b'look up node at tip'),
2041 2175 # 2x variation is to measure caching impact.
2042 2176 (lambda: resolvenodes(allnodes),
2043 2177 b'look up all nodes (forward)'),
2044 2178 (lambda: resolvenodes(allnodes, 2),
2045 2179 b'look up all nodes 2x (forward)'),
2046 2180 (lambda: resolvenodes(allnodesrev),
2047 2181 b'look up all nodes (reverse)'),
2048 2182 (lambda: resolvenodes(allnodesrev, 2),
2049 2183 b'look up all nodes 2x (reverse)'),
2050 2184 (lambda: getentries(allrevs),
2051 2185 b'retrieve all index entries (forward)'),
2052 2186 (lambda: getentries(allrevs, 2),
2053 2187 b'retrieve all index entries 2x (forward)'),
2054 2188 (lambda: getentries(allrevsrev),
2055 2189 b'retrieve all index entries (reverse)'),
2056 2190 (lambda: getentries(allrevsrev, 2),
2057 2191 b'retrieve all index entries 2x (reverse)'),
2058 2192 ]
2059 2193
2060 2194 for fn, title in benches:
2061 2195 timer, fm = gettimer(ui, opts)
2062 2196 timer(fn, title=title)
2063 2197 fm.end()
2064 2198
2065 2199 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
2066 2200 [(b'd', b'dist', 100, b'distance between the revisions'),
2067 2201 (b's', b'startrev', 0, b'revision to start reading at'),
2068 2202 (b'', b'reverse', False, b'read in reverse')],
2069 2203 b'-c|-m|FILE')
2070 2204 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
2071 2205 **opts):
2072 2206 """Benchmark reading a series of revisions from a revlog.
2073 2207
2074 2208 By default, we read every ``-d/--dist`` revision from 0 to tip of
2075 2209 the specified revlog.
2076 2210
2077 2211 The start revision can be defined via ``-s/--startrev``.
2078 2212 """
2079 2213 opts = _byteskwargs(opts)
2080 2214
2081 2215 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
2082 2216 rllen = getlen(ui)(rl)
2083 2217
2084 2218 if startrev < 0:
2085 2219 startrev = rllen + startrev
2086 2220
2087 2221 def d():
2088 2222 rl.clearcaches()
2089 2223
2090 2224 beginrev = startrev
2091 2225 endrev = rllen
2092 2226 dist = opts[b'dist']
2093 2227
2094 2228 if reverse:
2095 2229 beginrev, endrev = endrev - 1, beginrev - 1
2096 2230 dist = -1 * dist
2097 2231
2098 2232 for x in _xrange(beginrev, endrev, dist):
2099 2233 # Old revisions don't support passing int.
2100 2234 n = rl.node(x)
2101 2235 rl.revision(n)
2102 2236
2103 2237 timer, fm = gettimer(ui, opts)
2104 2238 timer(d)
2105 2239 fm.end()
2106 2240
2107 2241 @command(b'perfrevlogwrite', revlogopts + formatteropts +
2108 2242 [(b's', b'startrev', 1000, b'revision to start writing at'),
2109 2243 (b'', b'stoprev', -1, b'last revision to write'),
2110 2244 (b'', b'count', 3, b'number of passes to perform'),
2111 2245 (b'', b'details', False, b'print timing for every revisions tested'),
2112 2246 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
2113 2247 (b'', b'lazydeltabase', True, b'try the provided delta first'),
2114 2248 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
2115 2249 ],
2116 2250 b'-c|-m|FILE')
2117 2251 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
2118 2252 """Benchmark writing a series of revisions to a revlog.
2119 2253
2120 2254 Possible source values are:
2121 2255 * `full`: add from a full text (default).
2122 2256 * `parent-1`: add from a delta to the first parent
2123 2257 * `parent-2`: add from a delta to the second parent if it exists
2124 2258 (use a delta from the first parent otherwise)
2125 2259 * `parent-smallest`: add from the smallest delta (either p1 or p2)
2126 2260 * `storage`: add from the existing precomputed deltas
2127 2261
2128 2262 Note: This performance command measures performance in a custom way. As a
2129 2263 result some of the global configuration of the 'perf' command does not
2130 2264 apply to it:
2131 2265
2132 2266 * ``pre-run``: disabled
2133 2267
2134 2268 * ``profile-benchmark``: disabled
2135 2269
2136 2270 * ``run-limits``: disabled use --count instead
2137 2271 """
2138 2272 opts = _byteskwargs(opts)
2139 2273
2140 2274 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
2141 2275 rllen = getlen(ui)(rl)
2142 2276 if startrev < 0:
2143 2277 startrev = rllen + startrev
2144 2278 if stoprev < 0:
2145 2279 stoprev = rllen + stoprev
2146 2280
2147 2281 lazydeltabase = opts['lazydeltabase']
2148 2282 source = opts['source']
2149 2283 clearcaches = opts['clear_caches']
2150 2284 validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest',
2151 2285 b'storage')
2152 2286 if source not in validsource:
2153 2287 raise error.Abort('invalid source type: %s' % source)
2154 2288
2155 2289 ### actually gather results
2156 2290 count = opts['count']
2157 2291 if count <= 0:
2158 2292 raise error.Abort('invalide run count: %d' % count)
2159 2293 allresults = []
2160 2294 for c in range(count):
2161 2295 timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1,
2162 2296 lazydeltabase=lazydeltabase,
2163 2297 clearcaches=clearcaches)
2164 2298 allresults.append(timing)
2165 2299
2166 2300 ### consolidate the results in a single list
2167 2301 results = []
2168 2302 for idx, (rev, t) in enumerate(allresults[0]):
2169 2303 ts = [t]
2170 2304 for other in allresults[1:]:
2171 2305 orev, ot = other[idx]
2172 2306 assert orev == rev
2173 2307 ts.append(ot)
2174 2308 results.append((rev, ts))
2175 2309 resultcount = len(results)
2176 2310
2177 2311 ### Compute and display relevant statistics
2178 2312
2179 2313 # get a formatter
2180 2314 fm = ui.formatter(b'perf', opts)
2181 2315 displayall = ui.configbool(b"perf", b"all-timing", False)
2182 2316
2183 2317 # print individual details if requested
2184 2318 if opts['details']:
2185 2319 for idx, item in enumerate(results, 1):
2186 2320 rev, data = item
2187 2321 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
2188 2322 formatone(fm, data, title=title, displayall=displayall)
2189 2323
2190 2324 # sorts results by median time
2191 2325 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
2192 2326 # list of (name, index) to display)
2193 2327 relevants = [
2194 2328 ("min", 0),
2195 2329 ("10%", resultcount * 10 // 100),
2196 2330 ("25%", resultcount * 25 // 100),
2197 2331 ("50%", resultcount * 70 // 100),
2198 2332 ("75%", resultcount * 75 // 100),
2199 2333 ("90%", resultcount * 90 // 100),
2200 2334 ("95%", resultcount * 95 // 100),
2201 2335 ("99%", resultcount * 99 // 100),
2202 2336 ("99.9%", resultcount * 999 // 1000),
2203 2337 ("99.99%", resultcount * 9999 // 10000),
2204 2338 ("99.999%", resultcount * 99999 // 100000),
2205 2339 ("max", -1),
2206 2340 ]
2207 2341 if not ui.quiet:
2208 2342 for name, idx in relevants:
2209 2343 data = results[idx]
2210 2344 title = '%s of %d, rev %d' % (name, resultcount, data[0])
2211 2345 formatone(fm, data[1], title=title, displayall=displayall)
2212 2346
2213 2347 # XXX summing that many float will not be very precise, we ignore this fact
2214 2348 # for now
2215 2349 totaltime = []
2216 2350 for item in allresults:
2217 2351 totaltime.append((sum(x[1][0] for x in item),
2218 2352 sum(x[1][1] for x in item),
2219 2353 sum(x[1][2] for x in item),)
2220 2354 )
2221 2355 formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
2222 2356 displayall=displayall)
2223 2357 fm.end()
2224 2358
2225 2359 class _faketr(object):
2226 2360 def add(s, x, y, z=None):
2227 2361 return None
2228 2362
2229 2363 def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None,
2230 2364 lazydeltabase=True, clearcaches=True):
2231 2365 timings = []
2232 2366 tr = _faketr()
2233 2367 with _temprevlog(ui, orig, startrev) as dest:
2234 2368 dest._lazydeltabase = lazydeltabase
2235 2369 revs = list(orig.revs(startrev, stoprev))
2236 2370 total = len(revs)
2237 2371 topic = 'adding'
2238 2372 if runidx is not None:
2239 2373 topic += ' (run #%d)' % runidx
2240 2374 # Support both old and new progress API
2241 2375 if util.safehasattr(ui, 'makeprogress'):
2242 2376 progress = ui.makeprogress(topic, unit='revs', total=total)
2243 2377 def updateprogress(pos):
2244 2378 progress.update(pos)
2245 2379 def completeprogress():
2246 2380 progress.complete()
2247 2381 else:
2248 2382 def updateprogress(pos):
2249 2383 ui.progress(topic, pos, unit='revs', total=total)
2250 2384 def completeprogress():
2251 2385 ui.progress(topic, None, unit='revs', total=total)
2252 2386
2253 2387 for idx, rev in enumerate(revs):
2254 2388 updateprogress(idx)
2255 2389 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
2256 2390 if clearcaches:
2257 2391 dest.index.clearcaches()
2258 2392 dest.clearcaches()
2259 2393 with timeone() as r:
2260 2394 dest.addrawrevision(*addargs, **addkwargs)
2261 2395 timings.append((rev, r[0]))
2262 2396 updateprogress(total)
2263 2397 completeprogress()
2264 2398 return timings
2265 2399
2266 2400 def _getrevisionseed(orig, rev, tr, source):
2267 2401 from mercurial.node import nullid
2268 2402
2269 2403 linkrev = orig.linkrev(rev)
2270 2404 node = orig.node(rev)
2271 2405 p1, p2 = orig.parents(node)
2272 2406 flags = orig.flags(rev)
2273 2407 cachedelta = None
2274 2408 text = None
2275 2409
2276 2410 if source == b'full':
2277 2411 text = orig.revision(rev)
2278 2412 elif source == b'parent-1':
2279 2413 baserev = orig.rev(p1)
2280 2414 cachedelta = (baserev, orig.revdiff(p1, rev))
2281 2415 elif source == b'parent-2':
2282 2416 parent = p2
2283 2417 if p2 == nullid:
2284 2418 parent = p1
2285 2419 baserev = orig.rev(parent)
2286 2420 cachedelta = (baserev, orig.revdiff(parent, rev))
2287 2421 elif source == b'parent-smallest':
2288 2422 p1diff = orig.revdiff(p1, rev)
2289 2423 parent = p1
2290 2424 diff = p1diff
2291 2425 if p2 != nullid:
2292 2426 p2diff = orig.revdiff(p2, rev)
2293 2427 if len(p1diff) > len(p2diff):
2294 2428 parent = p2
2295 2429 diff = p2diff
2296 2430 baserev = orig.rev(parent)
2297 2431 cachedelta = (baserev, diff)
2298 2432 elif source == b'storage':
2299 2433 baserev = orig.deltaparent(rev)
2300 2434 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
2301 2435
2302 2436 return ((text, tr, linkrev, p1, p2),
2303 2437 {'node': node, 'flags': flags, 'cachedelta': cachedelta})
2304 2438
2305 2439 @contextlib.contextmanager
2306 2440 def _temprevlog(ui, orig, truncaterev):
2307 2441 from mercurial import vfs as vfsmod
2308 2442
2309 2443 if orig._inline:
2310 2444 raise error.Abort('not supporting inline revlog (yet)')
2311 2445 revlogkwargs = {}
2312 2446 k = 'upperboundcomp'
2313 2447 if util.safehasattr(orig, k):
2314 2448 revlogkwargs[k] = getattr(orig, k)
2315 2449
2316 2450 origindexpath = orig.opener.join(orig.indexfile)
2317 2451 origdatapath = orig.opener.join(orig.datafile)
2318 2452 indexname = 'revlog.i'
2319 2453 dataname = 'revlog.d'
2320 2454
2321 2455 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
2322 2456 try:
2323 2457 # copy the data file in a temporary directory
2324 2458 ui.debug('copying data in %s\n' % tmpdir)
2325 2459 destindexpath = os.path.join(tmpdir, 'revlog.i')
2326 2460 destdatapath = os.path.join(tmpdir, 'revlog.d')
2327 2461 shutil.copyfile(origindexpath, destindexpath)
2328 2462 shutil.copyfile(origdatapath, destdatapath)
2329 2463
2330 2464 # remove the data we want to add again
2331 2465 ui.debug('truncating data to be rewritten\n')
2332 2466 with open(destindexpath, 'ab') as index:
2333 2467 index.seek(0)
2334 2468 index.truncate(truncaterev * orig._io.size)
2335 2469 with open(destdatapath, 'ab') as data:
2336 2470 data.seek(0)
2337 2471 data.truncate(orig.start(truncaterev))
2338 2472
2339 2473 # instantiate a new revlog from the temporary copy
2340 2474 ui.debug('truncating adding to be rewritten\n')
2341 2475 vfs = vfsmod.vfs(tmpdir)
2342 2476 vfs.options = getattr(orig.opener, 'options', None)
2343 2477
2344 2478 dest = revlog.revlog(vfs,
2345 2479 indexfile=indexname,
2346 2480 datafile=dataname, **revlogkwargs)
2347 2481 if dest._inline:
2348 2482 raise error.Abort('not supporting inline revlog (yet)')
2349 2483 # make sure internals are initialized
2350 2484 dest.revision(len(dest) - 1)
2351 2485 yield dest
2352 2486 del dest, vfs
2353 2487 finally:
2354 2488 shutil.rmtree(tmpdir, True)
2355 2489
2356 2490 @command(b'perfrevlogchunks', revlogopts + formatteropts +
2357 2491 [(b'e', b'engines', b'', b'compression engines to use'),
2358 2492 (b's', b'startrev', 0, b'revision to start at')],
2359 2493 b'-c|-m|FILE')
2360 2494 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
2361 2495 """Benchmark operations on revlog chunks.
2362 2496
2363 2497 Logically, each revlog is a collection of fulltext revisions. However,
2364 2498 stored within each revlog are "chunks" of possibly compressed data. This
2365 2499 data needs to be read and decompressed or compressed and written.
2366 2500
2367 2501 This command measures the time it takes to read+decompress and recompress
2368 2502 chunks in a revlog. It effectively isolates I/O and compression performance.
2369 2503 For measurements of higher-level operations like resolving revisions,
2370 2504 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
2371 2505 """
2372 2506 opts = _byteskwargs(opts)
2373 2507
2374 2508 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
2375 2509
2376 2510 # _chunkraw was renamed to _getsegmentforrevs.
2377 2511 try:
2378 2512 segmentforrevs = rl._getsegmentforrevs
2379 2513 except AttributeError:
2380 2514 segmentforrevs = rl._chunkraw
2381 2515
2382 2516 # Verify engines argument.
2383 2517 if engines:
2384 2518 engines = set(e.strip() for e in engines.split(b','))
2385 2519 for engine in engines:
2386 2520 try:
2387 2521 util.compressionengines[engine]
2388 2522 except KeyError:
2389 2523 raise error.Abort(b'unknown compression engine: %s' % engine)
2390 2524 else:
2391 2525 engines = []
2392 2526 for e in util.compengines:
2393 2527 engine = util.compengines[e]
2394 2528 try:
2395 2529 if engine.available():
2396 2530 engine.revlogcompressor().compress(b'dummy')
2397 2531 engines.append(e)
2398 2532 except NotImplementedError:
2399 2533 pass
2400 2534
2401 2535 revs = list(rl.revs(startrev, len(rl) - 1))
2402 2536
2403 2537 def rlfh(rl):
2404 2538 if rl._inline:
2405 2539 return getsvfs(repo)(rl.indexfile)
2406 2540 else:
2407 2541 return getsvfs(repo)(rl.datafile)
2408 2542
2409 2543 def doread():
2410 2544 rl.clearcaches()
2411 2545 for rev in revs:
2412 2546 segmentforrevs(rev, rev)
2413 2547
2414 2548 def doreadcachedfh():
2415 2549 rl.clearcaches()
2416 2550 fh = rlfh(rl)
2417 2551 for rev in revs:
2418 2552 segmentforrevs(rev, rev, df=fh)
2419 2553
2420 2554 def doreadbatch():
2421 2555 rl.clearcaches()
2422 2556 segmentforrevs(revs[0], revs[-1])
2423 2557
2424 2558 def doreadbatchcachedfh():
2425 2559 rl.clearcaches()
2426 2560 fh = rlfh(rl)
2427 2561 segmentforrevs(revs[0], revs[-1], df=fh)
2428 2562
2429 2563 def dochunk():
2430 2564 rl.clearcaches()
2431 2565 fh = rlfh(rl)
2432 2566 for rev in revs:
2433 2567 rl._chunk(rev, df=fh)
2434 2568
2435 2569 chunks = [None]
2436 2570
2437 2571 def dochunkbatch():
2438 2572 rl.clearcaches()
2439 2573 fh = rlfh(rl)
2440 2574 # Save chunks as a side-effect.
2441 2575 chunks[0] = rl._chunks(revs, df=fh)
2442 2576
2443 2577 def docompress(compressor):
2444 2578 rl.clearcaches()
2445 2579
2446 2580 try:
2447 2581 # Swap in the requested compression engine.
2448 2582 oldcompressor = rl._compressor
2449 2583 rl._compressor = compressor
2450 2584 for chunk in chunks[0]:
2451 2585 rl.compress(chunk)
2452 2586 finally:
2453 2587 rl._compressor = oldcompressor
2454 2588
2455 2589 benches = [
2456 2590 (lambda: doread(), b'read'),
2457 2591 (lambda: doreadcachedfh(), b'read w/ reused fd'),
2458 2592 (lambda: doreadbatch(), b'read batch'),
2459 2593 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
2460 2594 (lambda: dochunk(), b'chunk'),
2461 2595 (lambda: dochunkbatch(), b'chunk batch'),
2462 2596 ]
2463 2597
2464 2598 for engine in sorted(engines):
2465 2599 compressor = util.compengines[engine].revlogcompressor()
2466 2600 benches.append((functools.partial(docompress, compressor),
2467 2601 b'compress w/ %s' % engine))
2468 2602
2469 2603 for fn, title in benches:
2470 2604 timer, fm = gettimer(ui, opts)
2471 2605 timer(fn, title=title)
2472 2606 fm.end()
2473 2607
2474 2608 @command(b'perfrevlogrevision', revlogopts + formatteropts +
2475 2609 [(b'', b'cache', False, b'use caches instead of clearing')],
2476 2610 b'-c|-m|FILE REV')
2477 2611 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
2478 2612 """Benchmark obtaining a revlog revision.
2479 2613
2480 2614 Obtaining a revlog revision consists of roughly the following steps:
2481 2615
2482 2616 1. Compute the delta chain
2483 2617 2. Slice the delta chain if applicable
2484 2618 3. Obtain the raw chunks for that delta chain
2485 2619 4. Decompress each raw chunk
2486 2620 5. Apply binary patches to obtain fulltext
2487 2621 6. Verify hash of fulltext
2488 2622
2489 2623 This command measures the time spent in each of these phases.
2490 2624 """
2491 2625 opts = _byteskwargs(opts)
2492 2626
2493 2627 if opts.get(b'changelog') or opts.get(b'manifest'):
2494 2628 file_, rev = None, file_
2495 2629 elif rev is None:
2496 2630 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
2497 2631
2498 2632 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
2499 2633
2500 2634 # _chunkraw was renamed to _getsegmentforrevs.
2501 2635 try:
2502 2636 segmentforrevs = r._getsegmentforrevs
2503 2637 except AttributeError:
2504 2638 segmentforrevs = r._chunkraw
2505 2639
2506 2640 node = r.lookup(rev)
2507 2641 rev = r.rev(node)
2508 2642
2509 2643 def getrawchunks(data, chain):
2510 2644 start = r.start
2511 2645 length = r.length
2512 2646 inline = r._inline
2513 2647 iosize = r._io.size
2514 2648 buffer = util.buffer
2515 2649
2516 2650 chunks = []
2517 2651 ladd = chunks.append
2518 2652 for idx, item in enumerate(chain):
2519 2653 offset = start(item[0])
2520 2654 bits = data[idx]
2521 2655 for rev in item:
2522 2656 chunkstart = start(rev)
2523 2657 if inline:
2524 2658 chunkstart += (rev + 1) * iosize
2525 2659 chunklength = length(rev)
2526 2660 ladd(buffer(bits, chunkstart - offset, chunklength))
2527 2661
2528 2662 return chunks
2529 2663
2530 2664 def dodeltachain(rev):
2531 2665 if not cache:
2532 2666 r.clearcaches()
2533 2667 r._deltachain(rev)
2534 2668
2535 2669 def doread(chain):
2536 2670 if not cache:
2537 2671 r.clearcaches()
2538 2672 for item in slicedchain:
2539 2673 segmentforrevs(item[0], item[-1])
2540 2674
2541 2675 def doslice(r, chain, size):
2542 2676 for s in slicechunk(r, chain, targetsize=size):
2543 2677 pass
2544 2678
2545 2679 def dorawchunks(data, chain):
2546 2680 if not cache:
2547 2681 r.clearcaches()
2548 2682 getrawchunks(data, chain)
2549 2683
2550 2684 def dodecompress(chunks):
2551 2685 decomp = r.decompress
2552 2686 for chunk in chunks:
2553 2687 decomp(chunk)
2554 2688
2555 2689 def dopatch(text, bins):
2556 2690 if not cache:
2557 2691 r.clearcaches()
2558 2692 mdiff.patches(text, bins)
2559 2693
2560 2694 def dohash(text):
2561 2695 if not cache:
2562 2696 r.clearcaches()
2563 2697 r.checkhash(text, node, rev=rev)
2564 2698
2565 2699 def dorevision():
2566 2700 if not cache:
2567 2701 r.clearcaches()
2568 2702 r.revision(node)
2569 2703
2570 2704 try:
2571 2705 from mercurial.revlogutils.deltas import slicechunk
2572 2706 except ImportError:
2573 2707 slicechunk = getattr(revlog, '_slicechunk', None)
2574 2708
2575 2709 size = r.length(rev)
2576 2710 chain = r._deltachain(rev)[0]
2577 2711 if not getattr(r, '_withsparseread', False):
2578 2712 slicedchain = (chain,)
2579 2713 else:
2580 2714 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
2581 2715 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
2582 2716 rawchunks = getrawchunks(data, slicedchain)
2583 2717 bins = r._chunks(chain)
2584 2718 text = bytes(bins[0])
2585 2719 bins = bins[1:]
2586 2720 text = mdiff.patches(text, bins)
2587 2721
2588 2722 benches = [
2589 2723 (lambda: dorevision(), b'full'),
2590 2724 (lambda: dodeltachain(rev), b'deltachain'),
2591 2725 (lambda: doread(chain), b'read'),
2592 2726 ]
2593 2727
2594 2728 if getattr(r, '_withsparseread', False):
2595 2729 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
2596 2730 benches.append(slicing)
2597 2731
2598 2732 benches.extend([
2599 2733 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
2600 2734 (lambda: dodecompress(rawchunks), b'decompress'),
2601 2735 (lambda: dopatch(text, bins), b'patch'),
2602 2736 (lambda: dohash(text), b'hash'),
2603 2737 ])
2604 2738
2605 2739 timer, fm = gettimer(ui, opts)
2606 2740 for fn, title in benches:
2607 2741 timer(fn, title=title)
2608 2742 fm.end()
2609 2743
2610 2744 @command(b'perfrevset',
2611 2745 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
2612 2746 (b'', b'contexts', False, b'obtain changectx for each revision')]
2613 2747 + formatteropts, b"REVSET")
2614 2748 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
2615 2749 """benchmark the execution time of a revset
2616 2750
2617 2751 Use the --clean option if need to evaluate the impact of build volatile
2618 2752 revisions set cache on the revset execution. Volatile cache hold filtered
2619 2753 and obsolete related cache."""
2620 2754 opts = _byteskwargs(opts)
2621 2755
2622 2756 timer, fm = gettimer(ui, opts)
2623 2757 def d():
2624 2758 if clear:
2625 2759 repo.invalidatevolatilesets()
2626 2760 if contexts:
2627 2761 for ctx in repo.set(expr): pass
2628 2762 else:
2629 2763 for r in repo.revs(expr): pass
2630 2764 timer(d)
2631 2765 fm.end()
2632 2766
2633 2767 @command(b'perfvolatilesets',
2634 2768 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
2635 2769 ] + formatteropts)
2636 2770 def perfvolatilesets(ui, repo, *names, **opts):
2637 2771 """benchmark the computation of various volatile set
2638 2772
2639 2773 Volatile set computes element related to filtering and obsolescence."""
2640 2774 opts = _byteskwargs(opts)
2641 2775 timer, fm = gettimer(ui, opts)
2642 2776 repo = repo.unfiltered()
2643 2777
2644 2778 def getobs(name):
2645 2779 def d():
2646 2780 repo.invalidatevolatilesets()
2647 2781 if opts[b'clear_obsstore']:
2648 2782 clearfilecache(repo, b'obsstore')
2649 2783 obsolete.getrevs(repo, name)
2650 2784 return d
2651 2785
2652 2786 allobs = sorted(obsolete.cachefuncs)
2653 2787 if names:
2654 2788 allobs = [n for n in allobs if n in names]
2655 2789
2656 2790 for name in allobs:
2657 2791 timer(getobs(name), title=name)
2658 2792
2659 2793 def getfiltered(name):
2660 2794 def d():
2661 2795 repo.invalidatevolatilesets()
2662 2796 if opts[b'clear_obsstore']:
2663 2797 clearfilecache(repo, b'obsstore')
2664 2798 repoview.filterrevs(repo, name)
2665 2799 return d
2666 2800
2667 2801 allfilter = sorted(repoview.filtertable)
2668 2802 if names:
2669 2803 allfilter = [n for n in allfilter if n in names]
2670 2804
2671 2805 for name in allfilter:
2672 2806 timer(getfiltered(name), title=name)
2673 2807 fm.end()
2674 2808
2675 2809 @command(b'perfbranchmap',
2676 2810 [(b'f', b'full', False,
2677 2811 b'Includes build time of subset'),
2678 2812 (b'', b'clear-revbranch', False,
2679 2813 b'purge the revbranch cache between computation'),
2680 2814 ] + formatteropts)
2681 2815 def perfbranchmap(ui, repo, *filternames, **opts):
2682 2816 """benchmark the update of a branchmap
2683 2817
2684 2818 This benchmarks the full repo.branchmap() call with read and write disabled
2685 2819 """
2686 2820 opts = _byteskwargs(opts)
2687 2821 full = opts.get(b"full", False)
2688 2822 clear_revbranch = opts.get(b"clear_revbranch", False)
2689 2823 timer, fm = gettimer(ui, opts)
2690 2824 def getbranchmap(filtername):
2691 2825 """generate a benchmark function for the filtername"""
2692 2826 if filtername is None:
2693 2827 view = repo
2694 2828 else:
2695 2829 view = repo.filtered(filtername)
2696 2830 if util.safehasattr(view._branchcaches, '_per_filter'):
2697 2831 filtered = view._branchcaches._per_filter
2698 2832 else:
2699 2833 # older versions
2700 2834 filtered = view._branchcaches
2701 2835 def d():
2702 2836 if clear_revbranch:
2703 2837 repo.revbranchcache()._clear()
2704 2838 if full:
2705 2839 view._branchcaches.clear()
2706 2840 else:
2707 2841 filtered.pop(filtername, None)
2708 2842 view.branchmap()
2709 2843 return d
2710 2844 # add filter in smaller subset to bigger subset
2711 2845 possiblefilters = set(repoview.filtertable)
2712 2846 if filternames:
2713 2847 possiblefilters &= set(filternames)
2714 2848 subsettable = getbranchmapsubsettable()
2715 2849 allfilters = []
2716 2850 while possiblefilters:
2717 2851 for name in possiblefilters:
2718 2852 subset = subsettable.get(name)
2719 2853 if subset not in possiblefilters:
2720 2854 break
2721 2855 else:
2722 2856 assert False, b'subset cycle %s!' % possiblefilters
2723 2857 allfilters.append(name)
2724 2858 possiblefilters.remove(name)
2725 2859
2726 2860 # warm the cache
2727 2861 if not full:
2728 2862 for name in allfilters:
2729 2863 repo.filtered(name).branchmap()
2730 2864 if not filternames or b'unfiltered' in filternames:
2731 2865 # add unfiltered
2732 2866 allfilters.append(None)
2733 2867
2734 2868 if util.safehasattr(branchmap.branchcache, 'fromfile'):
2735 2869 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
2736 2870 branchcacheread.set(classmethod(lambda *args: None))
2737 2871 else:
2738 2872 # older versions
2739 2873 branchcacheread = safeattrsetter(branchmap, b'read')
2740 2874 branchcacheread.set(lambda *args: None)
2741 2875 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
2742 2876 branchcachewrite.set(lambda *args: None)
2743 2877 try:
2744 2878 for name in allfilters:
2745 2879 printname = name
2746 2880 if name is None:
2747 2881 printname = b'unfiltered'
2748 2882 timer(getbranchmap(name), title=str(printname))
2749 2883 finally:
2750 2884 branchcacheread.restore()
2751 2885 branchcachewrite.restore()
2752 2886 fm.end()
2753 2887
2754 2888 @command(b'perfbranchmapupdate', [
2755 2889 (b'', b'base', [], b'subset of revision to start from'),
2756 2890 (b'', b'target', [], b'subset of revision to end with'),
2757 2891 (b'', b'clear-caches', False, b'clear cache between each runs')
2758 2892 ] + formatteropts)
2759 2893 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
2760 2894 """benchmark branchmap update from for <base> revs to <target> revs
2761 2895
2762 2896 If `--clear-caches` is passed, the following items will be reset before
2763 2897 each update:
2764 2898 * the changelog instance and associated indexes
2765 2899 * the rev-branch-cache instance
2766 2900
2767 2901 Examples:
2768 2902
2769 2903 # update for the one last revision
2770 2904 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
2771 2905
2772 2906 $ update for change coming with a new branch
2773 2907 $ hg perfbranchmapupdate --base 'stable' --target 'default'
2774 2908 """
2775 2909 from mercurial import branchmap
2776 2910 from mercurial import repoview
2777 2911 opts = _byteskwargs(opts)
2778 2912 timer, fm = gettimer(ui, opts)
2779 2913 clearcaches = opts[b'clear_caches']
2780 2914 unfi = repo.unfiltered()
2781 2915 x = [None] # used to pass data between closure
2782 2916
2783 2917 # we use a `list` here to avoid possible side effect from smartset
2784 2918 baserevs = list(scmutil.revrange(repo, base))
2785 2919 targetrevs = list(scmutil.revrange(repo, target))
2786 2920 if not baserevs:
2787 2921 raise error.Abort(b'no revisions selected for --base')
2788 2922 if not targetrevs:
2789 2923 raise error.Abort(b'no revisions selected for --target')
2790 2924
2791 2925 # make sure the target branchmap also contains the one in the base
2792 2926 targetrevs = list(set(baserevs) | set(targetrevs))
2793 2927 targetrevs.sort()
2794 2928
2795 2929 cl = repo.changelog
2796 2930 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
2797 2931 allbaserevs.sort()
2798 2932 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
2799 2933
2800 2934 newrevs = list(alltargetrevs.difference(allbaserevs))
2801 2935 newrevs.sort()
2802 2936
2803 2937 allrevs = frozenset(unfi.changelog.revs())
2804 2938 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
2805 2939 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
2806 2940
2807 2941 def basefilter(repo, visibilityexceptions=None):
2808 2942 return basefilterrevs
2809 2943
2810 2944 def targetfilter(repo, visibilityexceptions=None):
2811 2945 return targetfilterrevs
2812 2946
2813 2947 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
2814 2948 ui.status(msg % (len(allbaserevs), len(newrevs)))
2815 2949 if targetfilterrevs:
2816 2950 msg = b'(%d revisions still filtered)\n'
2817 2951 ui.status(msg % len(targetfilterrevs))
2818 2952
2819 2953 try:
2820 2954 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
2821 2955 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
2822 2956
2823 2957 baserepo = repo.filtered(b'__perf_branchmap_update_base')
2824 2958 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
2825 2959
2826 2960 # try to find an existing branchmap to reuse
2827 2961 subsettable = getbranchmapsubsettable()
2828 2962 candidatefilter = subsettable.get(None)
2829 2963 while candidatefilter is not None:
2830 2964 candidatebm = repo.filtered(candidatefilter).branchmap()
2831 2965 if candidatebm.validfor(baserepo):
2832 2966 filtered = repoview.filterrevs(repo, candidatefilter)
2833 2967 missing = [r for r in allbaserevs if r in filtered]
2834 2968 base = candidatebm.copy()
2835 2969 base.update(baserepo, missing)
2836 2970 break
2837 2971 candidatefilter = subsettable.get(candidatefilter)
2838 2972 else:
2839 2973 # no suitable subset where found
2840 2974 base = branchmap.branchcache()
2841 2975 base.update(baserepo, allbaserevs)
2842 2976
2843 2977 def setup():
2844 2978 x[0] = base.copy()
2845 2979 if clearcaches:
2846 2980 unfi._revbranchcache = None
2847 2981 clearchangelog(repo)
2848 2982
2849 2983 def bench():
2850 2984 x[0].update(targetrepo, newrevs)
2851 2985
2852 2986 timer(bench, setup=setup)
2853 2987 fm.end()
2854 2988 finally:
2855 2989 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
2856 2990 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
2857 2991
2858 2992 @command(b'perfbranchmapload', [
2859 2993 (b'f', b'filter', b'', b'Specify repoview filter'),
2860 2994 (b'', b'list', False, b'List brachmap filter caches'),
2861 2995 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
2862 2996
2863 2997 ] + formatteropts)
2864 2998 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
2865 2999 """benchmark reading the branchmap"""
2866 3000 opts = _byteskwargs(opts)
2867 3001 clearrevlogs = opts[b'clear_revlogs']
2868 3002
2869 3003 if list:
2870 3004 for name, kind, st in repo.cachevfs.readdir(stat=True):
2871 3005 if name.startswith(b'branch2'):
2872 3006 filtername = name.partition(b'-')[2] or b'unfiltered'
2873 3007 ui.status(b'%s - %s\n'
2874 3008 % (filtername, util.bytecount(st.st_size)))
2875 3009 return
2876 3010 if not filter:
2877 3011 filter = None
2878 3012 subsettable = getbranchmapsubsettable()
2879 3013 if filter is None:
2880 3014 repo = repo.unfiltered()
2881 3015 else:
2882 3016 repo = repoview.repoview(repo, filter)
2883 3017
2884 3018 repo.branchmap() # make sure we have a relevant, up to date branchmap
2885 3019
2886 3020 try:
2887 3021 fromfile = branchmap.branchcache.fromfile
2888 3022 except AttributeError:
2889 3023 # older versions
2890 3024 fromfile = branchmap.read
2891 3025
2892 3026 currentfilter = filter
2893 3027 # try once without timer, the filter may not be cached
2894 3028 while fromfile(repo) is None:
2895 3029 currentfilter = subsettable.get(currentfilter)
2896 3030 if currentfilter is None:
2897 3031 raise error.Abort(b'No branchmap cached for %s repo'
2898 3032 % (filter or b'unfiltered'))
2899 3033 repo = repo.filtered(currentfilter)
2900 3034 timer, fm = gettimer(ui, opts)
2901 3035 def setup():
2902 3036 if clearrevlogs:
2903 3037 clearchangelog(repo)
2904 3038 def bench():
2905 3039 fromfile(repo)
2906 3040 timer(bench, setup=setup)
2907 3041 fm.end()
2908 3042
2909 3043 @command(b'perfloadmarkers')
2910 3044 def perfloadmarkers(ui, repo):
2911 3045 """benchmark the time to parse the on-disk markers for a repo
2912 3046
2913 3047 Result is the number of markers in the repo."""
2914 3048 timer, fm = gettimer(ui)
2915 3049 svfs = getsvfs(repo)
2916 3050 timer(lambda: len(obsolete.obsstore(svfs)))
2917 3051 fm.end()
2918 3052
2919 3053 @command(b'perflrucachedict', formatteropts +
2920 3054 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
2921 3055 (b'', b'mincost', 0, b'smallest cost of items in cache'),
2922 3056 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
2923 3057 (b'', b'size', 4, b'size of cache'),
2924 3058 (b'', b'gets', 10000, b'number of key lookups'),
2925 3059 (b'', b'sets', 10000, b'number of key sets'),
2926 3060 (b'', b'mixed', 10000, b'number of mixed mode operations'),
2927 3061 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
2928 3062 norepo=True)
2929 3063 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
2930 3064 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
2931 3065 opts = _byteskwargs(opts)
2932 3066
2933 3067 def doinit():
2934 3068 for i in _xrange(10000):
2935 3069 util.lrucachedict(size)
2936 3070
2937 3071 costrange = list(range(mincost, maxcost + 1))
2938 3072
2939 3073 values = []
2940 3074 for i in _xrange(size):
2941 3075 values.append(random.randint(0, _maxint))
2942 3076
2943 3077 # Get mode fills the cache and tests raw lookup performance with no
2944 3078 # eviction.
2945 3079 getseq = []
2946 3080 for i in _xrange(gets):
2947 3081 getseq.append(random.choice(values))
2948 3082
2949 3083 def dogets():
2950 3084 d = util.lrucachedict(size)
2951 3085 for v in values:
2952 3086 d[v] = v
2953 3087 for key in getseq:
2954 3088 value = d[key]
2955 3089 value # silence pyflakes warning
2956 3090
2957 3091 def dogetscost():
2958 3092 d = util.lrucachedict(size, maxcost=costlimit)
2959 3093 for i, v in enumerate(values):
2960 3094 d.insert(v, v, cost=costs[i])
2961 3095 for key in getseq:
2962 3096 try:
2963 3097 value = d[key]
2964 3098 value # silence pyflakes warning
2965 3099 except KeyError:
2966 3100 pass
2967 3101
2968 3102 # Set mode tests insertion speed with cache eviction.
2969 3103 setseq = []
2970 3104 costs = []
2971 3105 for i in _xrange(sets):
2972 3106 setseq.append(random.randint(0, _maxint))
2973 3107 costs.append(random.choice(costrange))
2974 3108
2975 3109 def doinserts():
2976 3110 d = util.lrucachedict(size)
2977 3111 for v in setseq:
2978 3112 d.insert(v, v)
2979 3113
2980 3114 def doinsertscost():
2981 3115 d = util.lrucachedict(size, maxcost=costlimit)
2982 3116 for i, v in enumerate(setseq):
2983 3117 d.insert(v, v, cost=costs[i])
2984 3118
2985 3119 def dosets():
2986 3120 d = util.lrucachedict(size)
2987 3121 for v in setseq:
2988 3122 d[v] = v
2989 3123
2990 3124 # Mixed mode randomly performs gets and sets with eviction.
2991 3125 mixedops = []
2992 3126 for i in _xrange(mixed):
2993 3127 r = random.randint(0, 100)
2994 3128 if r < mixedgetfreq:
2995 3129 op = 0
2996 3130 else:
2997 3131 op = 1
2998 3132
2999 3133 mixedops.append((op,
3000 3134 random.randint(0, size * 2),
3001 3135 random.choice(costrange)))
3002 3136
3003 3137 def domixed():
3004 3138 d = util.lrucachedict(size)
3005 3139
3006 3140 for op, v, cost in mixedops:
3007 3141 if op == 0:
3008 3142 try:
3009 3143 d[v]
3010 3144 except KeyError:
3011 3145 pass
3012 3146 else:
3013 3147 d[v] = v
3014 3148
3015 3149 def domixedcost():
3016 3150 d = util.lrucachedict(size, maxcost=costlimit)
3017 3151
3018 3152 for op, v, cost in mixedops:
3019 3153 if op == 0:
3020 3154 try:
3021 3155 d[v]
3022 3156 except KeyError:
3023 3157 pass
3024 3158 else:
3025 3159 d.insert(v, v, cost=cost)
3026 3160
3027 3161 benches = [
3028 3162 (doinit, b'init'),
3029 3163 ]
3030 3164
3031 3165 if costlimit:
3032 3166 benches.extend([
3033 3167 (dogetscost, b'gets w/ cost limit'),
3034 3168 (doinsertscost, b'inserts w/ cost limit'),
3035 3169 (domixedcost, b'mixed w/ cost limit'),
3036 3170 ])
3037 3171 else:
3038 3172 benches.extend([
3039 3173 (dogets, b'gets'),
3040 3174 (doinserts, b'inserts'),
3041 3175 (dosets, b'sets'),
3042 3176 (domixed, b'mixed')
3043 3177 ])
3044 3178
3045 3179 for fn, title in benches:
3046 3180 timer, fm = gettimer(ui, opts)
3047 3181 timer(fn, title=title)
3048 3182 fm.end()
3049 3183
3050 3184 @command(b'perfwrite', formatteropts)
3051 3185 def perfwrite(ui, repo, **opts):
3052 3186 """microbenchmark ui.write
3053 3187 """
3054 3188 opts = _byteskwargs(opts)
3055 3189
3056 3190 timer, fm = gettimer(ui, opts)
3057 3191 def write():
3058 3192 for i in range(100000):
3059 3193 ui.write((b'Testing write performance\n'))
3060 3194 timer(write)
3061 3195 fm.end()
3062 3196
3063 3197 def uisetup(ui):
3064 3198 if (util.safehasattr(cmdutil, b'openrevlog') and
3065 3199 not util.safehasattr(commands, b'debugrevlogopts')):
3066 3200 # for "historical portability":
3067 3201 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
3068 3202 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
3069 3203 # openrevlog() should cause failure, because it has been
3070 3204 # available since 3.5 (or 49c583ca48c4).
3071 3205 def openrevlog(orig, repo, cmd, file_, opts):
3072 3206 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
3073 3207 raise error.Abort(b"This version doesn't support --dir option",
3074 3208 hint=b"use 3.5 or later")
3075 3209 return orig(repo, cmd, file_, opts)
3076 3210 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
3077 3211
3078 3212 @command(b'perfprogress', formatteropts + [
3079 3213 (b'', b'topic', b'topic', b'topic for progress messages'),
3080 3214 (b'c', b'total', 1000000, b'total value we are progressing to'),
3081 3215 ], norepo=True)
3082 3216 def perfprogress(ui, topic=None, total=None, **opts):
3083 3217 """printing of progress bars"""
3084 3218 opts = _byteskwargs(opts)
3085 3219
3086 3220 timer, fm = gettimer(ui, opts)
3087 3221
3088 3222 def doprogress():
3089 3223 with ui.makeprogress(topic, total=total) as progress:
3090 3224 for i in _xrange(total):
3091 3225 progress.increment()
3092 3226
3093 3227 timer(doprogress)
3094 3228 fm.end()
General Comments 0
You need to be logged in to leave comments. Login now