##// END OF EJS Templates
perf: handle NameError for `pycompat.foo` when pycompat wasn't imported...
Martin von Zweigbergk -
r43051:c8d3af9c default
parent child Browse files
Show More
@@ -1,3092 +1,3092
1 1 # perf.py - performance test routines
2 2 '''helper extension to measure performance
3 3
4 4 Configurations
5 5 ==============
6 6
7 7 ``perf``
8 8 --------
9 9
10 10 ``all-timing``
11 11 When set, additional statistics will be reported for each benchmark: best,
12 12 worst, median average. If not set only the best timing is reported
13 13 (default: off).
14 14
15 15 ``presleep``
16 16 number of second to wait before any group of runs (default: 1)
17 17
18 18 ``pre-run``
19 19 number of run to perform before starting measurement.
20 20
21 21 ``profile-benchmark``
22 22 Enable profiling for the benchmarked section.
23 23 (The first iteration is benchmarked)
24 24
25 25 ``run-limits``
26 26 Control the number of runs each benchmark will perform. The option value
27 27 should be a list of `<time>-<numberofrun>` pairs. After each run the
28 28 conditions are considered in order with the following logic:
29 29
30 30 If benchmark has been running for <time> seconds, and we have performed
31 31 <numberofrun> iterations, stop the benchmark,
32 32
33 33 The default value is: `3.0-100, 10.0-3`
34 34
35 35 ``stub``
36 36 When set, benchmarks will only be run once, useful for testing
37 37 (default: off)
38 38 '''
39 39
40 40 # "historical portability" policy of perf.py:
41 41 #
42 42 # We have to do:
43 43 # - make perf.py "loadable" with as wide Mercurial version as possible
44 44 # This doesn't mean that perf commands work correctly with that Mercurial.
45 45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
46 46 # - make historical perf command work correctly with as wide Mercurial
47 47 # version as possible
48 48 #
49 49 # We have to do, if possible with reasonable cost:
50 50 # - make recent perf command for historical feature work correctly
51 51 # with early Mercurial
52 52 #
53 53 # We don't have to do:
54 54 # - make perf command for recent feature work correctly with early
55 55 # Mercurial
56 56
57 57 from __future__ import absolute_import
58 58 import contextlib
59 59 import functools
60 60 import gc
61 61 import os
62 62 import random
63 63 import shutil
64 64 import struct
65 65 import sys
66 66 import tempfile
67 67 import threading
68 68 import time
69 69 from mercurial import (
70 70 changegroup,
71 71 cmdutil,
72 72 commands,
73 73 copies,
74 74 error,
75 75 extensions,
76 76 hg,
77 77 mdiff,
78 78 merge,
79 79 revlog,
80 80 util,
81 81 )
82 82
83 83 # for "historical portability":
84 84 # try to import modules separately (in dict order), and ignore
85 85 # failure, because these aren't available with early Mercurial
86 86 try:
87 87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
88 88 except ImportError:
89 89 pass
90 90 try:
91 91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
92 92 except ImportError:
93 93 pass
94 94 try:
95 95 from mercurial import registrar # since 3.7 (or 37d50250b696)
96 96 dir(registrar) # forcibly load it
97 97 except ImportError:
98 98 registrar = None
99 99 try:
100 100 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
101 101 except ImportError:
102 102 pass
103 103 try:
104 104 from mercurial.utils import repoviewutil # since 5.0
105 105 except ImportError:
106 106 repoviewutil = None
107 107 try:
108 108 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
109 109 except ImportError:
110 110 pass
111 111 try:
112 112 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
113 113 except ImportError:
114 114 pass
115 115
116 116 try:
117 117 from mercurial import profiling
118 118 except ImportError:
119 119 profiling = None
120 120
121 121 def identity(a):
122 122 return a
123 123
124 124 try:
125 125 from mercurial import pycompat
126 126 getargspec = pycompat.getargspec # added to module after 4.5
127 127 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
128 128 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
129 129 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
130 130 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
131 131 if pycompat.ispy3:
132 132 _maxint = sys.maxsize # per py3 docs for replacing maxint
133 133 else:
134 134 _maxint = sys.maxint
135 except (ImportError, AttributeError):
135 except (NameError, ImportError, AttributeError):
136 136 import inspect
137 137 getargspec = inspect.getargspec
138 138 _byteskwargs = identity
139 139 fsencode = identity # no py3 support
140 140 _maxint = sys.maxint # no py3 support
141 141 _sysstr = lambda x: x # no py3 support
142 142 _xrange = xrange
143 143
144 144 try:
145 145 # 4.7+
146 146 queue = pycompat.queue.Queue
147 except (AttributeError, ImportError):
147 except (NameError, AttributeError, ImportError):
148 148 # <4.7.
149 149 try:
150 150 queue = pycompat.queue
151 except (AttributeError, ImportError):
151 except (NameError, AttributeError, ImportError):
152 152 queue = util.queue
153 153
154 154 try:
155 155 from mercurial import logcmdutil
156 156 makelogtemplater = logcmdutil.maketemplater
157 157 except (AttributeError, ImportError):
158 158 try:
159 159 makelogtemplater = cmdutil.makelogtemplater
160 160 except (AttributeError, ImportError):
161 161 makelogtemplater = None
162 162
163 163 # for "historical portability":
164 164 # define util.safehasattr forcibly, because util.safehasattr has been
165 165 # available since 1.9.3 (or 94b200a11cf7)
166 166 _undefined = object()
167 167 def safehasattr(thing, attr):
168 168 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
169 169 setattr(util, 'safehasattr', safehasattr)
170 170
171 171 # for "historical portability":
172 172 # define util.timer forcibly, because util.timer has been available
173 173 # since ae5d60bb70c9
174 174 if safehasattr(time, 'perf_counter'):
175 175 util.timer = time.perf_counter
176 176 elif os.name == b'nt':
177 177 util.timer = time.clock
178 178 else:
179 179 util.timer = time.time
180 180
181 181 # for "historical portability":
182 182 # use locally defined empty option list, if formatteropts isn't
183 183 # available, because commands.formatteropts has been available since
184 184 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
185 185 # available since 2.2 (or ae5f92e154d3)
186 186 formatteropts = getattr(cmdutil, "formatteropts",
187 187 getattr(commands, "formatteropts", []))
188 188
189 189 # for "historical portability":
190 190 # use locally defined option list, if debugrevlogopts isn't available,
191 191 # because commands.debugrevlogopts has been available since 3.7 (or
192 192 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
193 193 # since 1.9 (or a79fea6b3e77).
194 194 revlogopts = getattr(cmdutil, "debugrevlogopts",
195 195 getattr(commands, "debugrevlogopts", [
196 196 (b'c', b'changelog', False, (b'open changelog')),
197 197 (b'm', b'manifest', False, (b'open manifest')),
198 198 (b'', b'dir', False, (b'open directory manifest')),
199 199 ]))
200 200
201 201 cmdtable = {}
202 202
203 203 # for "historical portability":
204 204 # define parsealiases locally, because cmdutil.parsealiases has been
205 205 # available since 1.5 (or 6252852b4332)
206 206 def parsealiases(cmd):
207 207 return cmd.split(b"|")
208 208
209 209 if safehasattr(registrar, 'command'):
210 210 command = registrar.command(cmdtable)
211 211 elif safehasattr(cmdutil, 'command'):
212 212 command = cmdutil.command(cmdtable)
213 213 if b'norepo' not in getargspec(command).args:
214 214 # for "historical portability":
215 215 # wrap original cmdutil.command, because "norepo" option has
216 216 # been available since 3.1 (or 75a96326cecb)
217 217 _command = command
218 218 def command(name, options=(), synopsis=None, norepo=False):
219 219 if norepo:
220 220 commands.norepo += b' %s' % b' '.join(parsealiases(name))
221 221 return _command(name, list(options), synopsis)
222 222 else:
223 223 # for "historical portability":
224 224 # define "@command" annotation locally, because cmdutil.command
225 225 # has been available since 1.9 (or 2daa5179e73f)
226 226 def command(name, options=(), synopsis=None, norepo=False):
227 227 def decorator(func):
228 228 if synopsis:
229 229 cmdtable[name] = func, list(options), synopsis
230 230 else:
231 231 cmdtable[name] = func, list(options)
232 232 if norepo:
233 233 commands.norepo += b' %s' % b' '.join(parsealiases(name))
234 234 return func
235 235 return decorator
236 236
237 237 try:
238 238 import mercurial.registrar
239 239 import mercurial.configitems
240 240 configtable = {}
241 241 configitem = mercurial.registrar.configitem(configtable)
242 242 configitem(b'perf', b'presleep',
243 243 default=mercurial.configitems.dynamicdefault,
244 244 experimental=True,
245 245 )
246 246 configitem(b'perf', b'stub',
247 247 default=mercurial.configitems.dynamicdefault,
248 248 experimental=True,
249 249 )
250 250 configitem(b'perf', b'parentscount',
251 251 default=mercurial.configitems.dynamicdefault,
252 252 experimental=True,
253 253 )
254 254 configitem(b'perf', b'all-timing',
255 255 default=mercurial.configitems.dynamicdefault,
256 256 experimental=True,
257 257 )
258 258 configitem(b'perf', b'pre-run',
259 259 default=mercurial.configitems.dynamicdefault,
260 260 )
261 261 configitem(b'perf', b'profile-benchmark',
262 262 default=mercurial.configitems.dynamicdefault,
263 263 )
264 264 configitem(b'perf', b'run-limits',
265 265 default=mercurial.configitems.dynamicdefault,
266 266 experimental=True,
267 267 )
268 268 except (ImportError, AttributeError):
269 269 pass
270 270 except TypeError:
271 271 # compatibility fix for a11fd395e83f
272 272 # hg version: 5.2
273 273 configitem(b'perf', b'presleep',
274 274 default=mercurial.configitems.dynamicdefault,
275 275 )
276 276 configitem(b'perf', b'stub',
277 277 default=mercurial.configitems.dynamicdefault,
278 278 )
279 279 configitem(b'perf', b'parentscount',
280 280 default=mercurial.configitems.dynamicdefault,
281 281 )
282 282 configitem(b'perf', b'all-timing',
283 283 default=mercurial.configitems.dynamicdefault,
284 284 )
285 285 configitem(b'perf', b'pre-run',
286 286 default=mercurial.configitems.dynamicdefault,
287 287 )
288 288 configitem(b'perf', b'profile-benchmark',
289 289 default=mercurial.configitems.dynamicdefault,
290 290 )
291 291 configitem(b'perf', b'run-limits',
292 292 default=mercurial.configitems.dynamicdefault,
293 293 )
294 294
295 295 def getlen(ui):
296 296 if ui.configbool(b"perf", b"stub", False):
297 297 return lambda x: 1
298 298 return len
299 299
300 300 class noop(object):
301 301 """dummy context manager"""
302 302 def __enter__(self):
303 303 pass
304 304 def __exit__(self, *args):
305 305 pass
306 306
307 307 NOOPCTX = noop()
308 308
309 309 def gettimer(ui, opts=None):
310 310 """return a timer function and formatter: (timer, formatter)
311 311
312 312 This function exists to gather the creation of formatter in a single
313 313 place instead of duplicating it in all performance commands."""
314 314
315 315 # enforce an idle period before execution to counteract power management
316 316 # experimental config: perf.presleep
317 317 time.sleep(getint(ui, b"perf", b"presleep", 1))
318 318
319 319 if opts is None:
320 320 opts = {}
321 321 # redirect all to stderr unless buffer api is in use
322 322 if not ui._buffers:
323 323 ui = ui.copy()
324 324 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
325 325 if uifout:
326 326 # for "historical portability":
327 327 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
328 328 uifout.set(ui.ferr)
329 329
330 330 # get a formatter
331 331 uiformatter = getattr(ui, 'formatter', None)
332 332 if uiformatter:
333 333 fm = uiformatter(b'perf', opts)
334 334 else:
335 335 # for "historical portability":
336 336 # define formatter locally, because ui.formatter has been
337 337 # available since 2.2 (or ae5f92e154d3)
338 338 from mercurial import node
339 339 class defaultformatter(object):
340 340 """Minimized composition of baseformatter and plainformatter
341 341 """
342 342 def __init__(self, ui, topic, opts):
343 343 self._ui = ui
344 344 if ui.debugflag:
345 345 self.hexfunc = node.hex
346 346 else:
347 347 self.hexfunc = node.short
348 348 def __nonzero__(self):
349 349 return False
350 350 __bool__ = __nonzero__
351 351 def startitem(self):
352 352 pass
353 353 def data(self, **data):
354 354 pass
355 355 def write(self, fields, deftext, *fielddata, **opts):
356 356 self._ui.write(deftext % fielddata, **opts)
357 357 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
358 358 if cond:
359 359 self._ui.write(deftext % fielddata, **opts)
360 360 def plain(self, text, **opts):
361 361 self._ui.write(text, **opts)
362 362 def end(self):
363 363 pass
364 364 fm = defaultformatter(ui, b'perf', opts)
365 365
366 366 # stub function, runs code only once instead of in a loop
367 367 # experimental config: perf.stub
368 368 if ui.configbool(b"perf", b"stub", False):
369 369 return functools.partial(stub_timer, fm), fm
370 370
371 371 # experimental config: perf.all-timing
372 372 displayall = ui.configbool(b"perf", b"all-timing", False)
373 373
374 374 # experimental config: perf.run-limits
375 375 limitspec = ui.configlist(b"perf", b"run-limits", [])
376 376 limits = []
377 377 for item in limitspec:
378 378 parts = item.split(b'-', 1)
379 379 if len(parts) < 2:
380 380 ui.warn((b'malformatted run limit entry, missing "-": %s\n'
381 381 % item))
382 382 continue
383 383 try:
384 384 time_limit = float(pycompat.sysstr(parts[0]))
385 385 except ValueError as e:
386 386 ui.warn((b'malformatted run limit entry, %s: %s\n'
387 387 % (pycompat.bytestr(e), item)))
388 388 continue
389 389 try:
390 390 run_limit = int(pycompat.sysstr(parts[1]))
391 391 except ValueError as e:
392 392 ui.warn((b'malformatted run limit entry, %s: %s\n'
393 393 % (pycompat.bytestr(e), item)))
394 394 continue
395 395 limits.append((time_limit, run_limit))
396 396 if not limits:
397 397 limits = DEFAULTLIMITS
398 398
399 399 profiler = None
400 400 if profiling is not None:
401 401 if ui.configbool(b"perf", b"profile-benchmark", False):
402 402 profiler = profiling.profile(ui)
403 403
404 404 prerun = getint(ui, b"perf", b"pre-run", 0)
405 405 t = functools.partial(_timer, fm, displayall=displayall, limits=limits,
406 406 prerun=prerun, profiler=profiler)
407 407 return t, fm
408 408
409 409 def stub_timer(fm, func, setup=None, title=None):
410 410 if setup is not None:
411 411 setup()
412 412 func()
413 413
414 414 @contextlib.contextmanager
415 415 def timeone():
416 416 r = []
417 417 ostart = os.times()
418 418 cstart = util.timer()
419 419 yield r
420 420 cstop = util.timer()
421 421 ostop = os.times()
422 422 a, b = ostart, ostop
423 423 r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
424 424
425 425
426 426 # list of stop condition (elapsed time, minimal run count)
427 427 DEFAULTLIMITS = (
428 428 (3.0, 100),
429 429 (10.0, 3),
430 430 )
431 431
432 432 def _timer(fm, func, setup=None, title=None, displayall=False,
433 433 limits=DEFAULTLIMITS, prerun=0, profiler=None):
434 434 gc.collect()
435 435 results = []
436 436 begin = util.timer()
437 437 count = 0
438 438 if profiler is None:
439 439 profiler = NOOPCTX
440 440 for i in range(prerun):
441 441 if setup is not None:
442 442 setup()
443 443 func()
444 444 keepgoing = True
445 445 while keepgoing:
446 446 if setup is not None:
447 447 setup()
448 448 with profiler:
449 449 with timeone() as item:
450 450 r = func()
451 451 profiler = NOOPCTX
452 452 count += 1
453 453 results.append(item[0])
454 454 cstop = util.timer()
455 455 # Look for a stop condition.
456 456 elapsed = cstop - begin
457 457 for t, mincount in limits:
458 458 if elapsed >= t and count >= mincount:
459 459 keepgoing = False
460 460 break
461 461
462 462 formatone(fm, results, title=title, result=r,
463 463 displayall=displayall)
464 464
465 465 def formatone(fm, timings, title=None, result=None, displayall=False):
466 466
467 467 count = len(timings)
468 468
469 469 fm.startitem()
470 470
471 471 if title:
472 472 fm.write(b'title', b'! %s\n', title)
473 473 if result:
474 474 fm.write(b'result', b'! result: %s\n', result)
475 475 def display(role, entry):
476 476 prefix = b''
477 477 if role != b'best':
478 478 prefix = b'%s.' % role
479 479 fm.plain(b'!')
480 480 fm.write(prefix + b'wall', b' wall %f', entry[0])
481 481 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
482 482 fm.write(prefix + b'user', b' user %f', entry[1])
483 483 fm.write(prefix + b'sys', b' sys %f', entry[2])
484 484 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
485 485 fm.plain(b'\n')
486 486 timings.sort()
487 487 min_val = timings[0]
488 488 display(b'best', min_val)
489 489 if displayall:
490 490 max_val = timings[-1]
491 491 display(b'max', max_val)
492 492 avg = tuple([sum(x) / count for x in zip(*timings)])
493 493 display(b'avg', avg)
494 494 median = timings[len(timings) // 2]
495 495 display(b'median', median)
496 496
497 497 # utilities for historical portability
498 498
499 499 def getint(ui, section, name, default):
500 500 # for "historical portability":
501 501 # ui.configint has been available since 1.9 (or fa2b596db182)
502 502 v = ui.config(section, name, None)
503 503 if v is None:
504 504 return default
505 505 try:
506 506 return int(v)
507 507 except ValueError:
508 508 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
509 509 % (section, name, v))
510 510
511 511 def safeattrsetter(obj, name, ignoremissing=False):
512 512 """Ensure that 'obj' has 'name' attribute before subsequent setattr
513 513
514 514 This function is aborted, if 'obj' doesn't have 'name' attribute
515 515 at runtime. This avoids overlooking removal of an attribute, which
516 516 breaks assumption of performance measurement, in the future.
517 517
518 518 This function returns the object to (1) assign a new value, and
519 519 (2) restore an original value to the attribute.
520 520
521 521 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
522 522 abortion, and this function returns None. This is useful to
523 523 examine an attribute, which isn't ensured in all Mercurial
524 524 versions.
525 525 """
526 526 if not util.safehasattr(obj, name):
527 527 if ignoremissing:
528 528 return None
529 529 raise error.Abort((b"missing attribute %s of %s might break assumption"
530 530 b" of performance measurement") % (name, obj))
531 531
532 532 origvalue = getattr(obj, _sysstr(name))
533 533 class attrutil(object):
534 534 def set(self, newvalue):
535 535 setattr(obj, _sysstr(name), newvalue)
536 536 def restore(self):
537 537 setattr(obj, _sysstr(name), origvalue)
538 538
539 539 return attrutil()
540 540
541 541 # utilities to examine each internal API changes
542 542
543 543 def getbranchmapsubsettable():
544 544 # for "historical portability":
545 545 # subsettable is defined in:
546 546 # - branchmap since 2.9 (or 175c6fd8cacc)
547 547 # - repoview since 2.5 (or 59a9f18d4587)
548 548 # - repoviewutil since 5.0
549 549 for mod in (branchmap, repoview, repoviewutil):
550 550 subsettable = getattr(mod, 'subsettable', None)
551 551 if subsettable:
552 552 return subsettable
553 553
554 554 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
555 555 # branchmap and repoview modules exist, but subsettable attribute
556 556 # doesn't)
557 557 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
558 558 hint=b"use 2.5 or later")
559 559
560 560 def getsvfs(repo):
561 561 """Return appropriate object to access files under .hg/store
562 562 """
563 563 # for "historical portability":
564 564 # repo.svfs has been available since 2.3 (or 7034365089bf)
565 565 svfs = getattr(repo, 'svfs', None)
566 566 if svfs:
567 567 return svfs
568 568 else:
569 569 return getattr(repo, 'sopener')
570 570
571 571 def getvfs(repo):
572 572 """Return appropriate object to access files under .hg
573 573 """
574 574 # for "historical portability":
575 575 # repo.vfs has been available since 2.3 (or 7034365089bf)
576 576 vfs = getattr(repo, 'vfs', None)
577 577 if vfs:
578 578 return vfs
579 579 else:
580 580 return getattr(repo, 'opener')
581 581
582 582 def repocleartagscachefunc(repo):
583 583 """Return the function to clear tags cache according to repo internal API
584 584 """
585 585 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
586 586 # in this case, setattr(repo, '_tagscache', None) or so isn't
587 587 # correct way to clear tags cache, because existing code paths
588 588 # expect _tagscache to be a structured object.
589 589 def clearcache():
590 590 # _tagscache has been filteredpropertycache since 2.5 (or
591 591 # 98c867ac1330), and delattr() can't work in such case
592 592 if b'_tagscache' in vars(repo):
593 593 del repo.__dict__[b'_tagscache']
594 594 return clearcache
595 595
596 596 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
597 597 if repotags: # since 1.4 (or 5614a628d173)
598 598 return lambda : repotags.set(None)
599 599
600 600 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
601 601 if repotagscache: # since 0.6 (or d7df759d0e97)
602 602 return lambda : repotagscache.set(None)
603 603
604 604 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
605 605 # this point, but it isn't so problematic, because:
606 606 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
607 607 # in perftags() causes failure soon
608 608 # - perf.py itself has been available since 1.1 (or eb240755386d)
609 609 raise error.Abort((b"tags API of this hg command is unknown"))
610 610
611 611 # utilities to clear cache
612 612
613 613 def clearfilecache(obj, attrname):
614 614 unfiltered = getattr(obj, 'unfiltered', None)
615 615 if unfiltered is not None:
616 616 obj = obj.unfiltered()
617 617 if attrname in vars(obj):
618 618 delattr(obj, attrname)
619 619 obj._filecache.pop(attrname, None)
620 620
621 621 def clearchangelog(repo):
622 622 if repo is not repo.unfiltered():
623 623 object.__setattr__(repo, r'_clcachekey', None)
624 624 object.__setattr__(repo, r'_clcache', None)
625 625 clearfilecache(repo.unfiltered(), 'changelog')
626 626
627 627 # perf commands
628 628
629 629 @command(b'perfwalk', formatteropts)
630 630 def perfwalk(ui, repo, *pats, **opts):
631 631 opts = _byteskwargs(opts)
632 632 timer, fm = gettimer(ui, opts)
633 633 m = scmutil.match(repo[None], pats, {})
634 634 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
635 635 ignored=False))))
636 636 fm.end()
637 637
638 638 @command(b'perfannotate', formatteropts)
639 639 def perfannotate(ui, repo, f, **opts):
640 640 opts = _byteskwargs(opts)
641 641 timer, fm = gettimer(ui, opts)
642 642 fc = repo[b'.'][f]
643 643 timer(lambda: len(fc.annotate(True)))
644 644 fm.end()
645 645
646 646 @command(b'perfstatus',
647 647 [(b'u', b'unknown', False,
648 648 b'ask status to look for unknown files')] + formatteropts)
649 649 def perfstatus(ui, repo, **opts):
650 650 opts = _byteskwargs(opts)
651 651 #m = match.always(repo.root, repo.getcwd())
652 652 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
653 653 # False))))
654 654 timer, fm = gettimer(ui, opts)
655 655 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
656 656 fm.end()
657 657
658 658 @command(b'perfaddremove', formatteropts)
659 659 def perfaddremove(ui, repo, **opts):
660 660 opts = _byteskwargs(opts)
661 661 timer, fm = gettimer(ui, opts)
662 662 try:
663 663 oldquiet = repo.ui.quiet
664 664 repo.ui.quiet = True
665 665 matcher = scmutil.match(repo[None])
666 666 opts[b'dry_run'] = True
667 667 if b'uipathfn' in getargspec(scmutil.addremove).args:
668 668 uipathfn = scmutil.getuipathfn(repo)
669 669 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
670 670 else:
671 671 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
672 672 finally:
673 673 repo.ui.quiet = oldquiet
674 674 fm.end()
675 675
676 676 def clearcaches(cl):
677 677 # behave somewhat consistently across internal API changes
678 678 if util.safehasattr(cl, b'clearcaches'):
679 679 cl.clearcaches()
680 680 elif util.safehasattr(cl, b'_nodecache'):
681 681 from mercurial.node import nullid, nullrev
682 682 cl._nodecache = {nullid: nullrev}
683 683 cl._nodepos = None
684 684
685 685 @command(b'perfheads', formatteropts)
686 686 def perfheads(ui, repo, **opts):
687 687 """benchmark the computation of a changelog heads"""
688 688 opts = _byteskwargs(opts)
689 689 timer, fm = gettimer(ui, opts)
690 690 cl = repo.changelog
691 691 def s():
692 692 clearcaches(cl)
693 693 def d():
694 694 len(cl.headrevs())
695 695 timer(d, setup=s)
696 696 fm.end()
697 697
698 698 @command(b'perftags', formatteropts+
699 699 [
700 700 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
701 701 ])
702 702 def perftags(ui, repo, **opts):
703 703 opts = _byteskwargs(opts)
704 704 timer, fm = gettimer(ui, opts)
705 705 repocleartagscache = repocleartagscachefunc(repo)
706 706 clearrevlogs = opts[b'clear_revlogs']
707 707 def s():
708 708 if clearrevlogs:
709 709 clearchangelog(repo)
710 710 clearfilecache(repo.unfiltered(), 'manifest')
711 711 repocleartagscache()
712 712 def t():
713 713 return len(repo.tags())
714 714 timer(t, setup=s)
715 715 fm.end()
716 716
717 717 @command(b'perfancestors', formatteropts)
718 718 def perfancestors(ui, repo, **opts):
719 719 opts = _byteskwargs(opts)
720 720 timer, fm = gettimer(ui, opts)
721 721 heads = repo.changelog.headrevs()
722 722 def d():
723 723 for a in repo.changelog.ancestors(heads):
724 724 pass
725 725 timer(d)
726 726 fm.end()
727 727
728 728 @command(b'perfancestorset', formatteropts)
729 729 def perfancestorset(ui, repo, revset, **opts):
730 730 opts = _byteskwargs(opts)
731 731 timer, fm = gettimer(ui, opts)
732 732 revs = repo.revs(revset)
733 733 heads = repo.changelog.headrevs()
734 734 def d():
735 735 s = repo.changelog.ancestors(heads)
736 736 for rev in revs:
737 737 rev in s
738 738 timer(d)
739 739 fm.end()
740 740
741 741 @command(b'perfdiscovery', formatteropts, b'PATH')
742 742 def perfdiscovery(ui, repo, path, **opts):
743 743 """benchmark discovery between local repo and the peer at given path
744 744 """
745 745 repos = [repo, None]
746 746 timer, fm = gettimer(ui, opts)
747 747 path = ui.expandpath(path)
748 748
749 749 def s():
750 750 repos[1] = hg.peer(ui, opts, path)
751 751 def d():
752 752 setdiscovery.findcommonheads(ui, *repos)
753 753 timer(d, setup=s)
754 754 fm.end()
755 755
756 756 @command(b'perfbookmarks', formatteropts +
757 757 [
758 758 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
759 759 ])
760 760 def perfbookmarks(ui, repo, **opts):
761 761 """benchmark parsing bookmarks from disk to memory"""
762 762 opts = _byteskwargs(opts)
763 763 timer, fm = gettimer(ui, opts)
764 764
765 765 clearrevlogs = opts[b'clear_revlogs']
766 766 def s():
767 767 if clearrevlogs:
768 768 clearchangelog(repo)
769 769 clearfilecache(repo, b'_bookmarks')
770 770 def d():
771 771 repo._bookmarks
772 772 timer(d, setup=s)
773 773 fm.end()
774 774
775 775 @command(b'perfbundleread', formatteropts, b'BUNDLE')
776 776 def perfbundleread(ui, repo, bundlepath, **opts):
777 777 """Benchmark reading of bundle files.
778 778
779 779 This command is meant to isolate the I/O part of bundle reading as
780 780 much as possible.
781 781 """
782 782 from mercurial import (
783 783 bundle2,
784 784 exchange,
785 785 streamclone,
786 786 )
787 787
788 788 opts = _byteskwargs(opts)
789 789
790 790 def makebench(fn):
791 791 def run():
792 792 with open(bundlepath, b'rb') as fh:
793 793 bundle = exchange.readbundle(ui, fh, bundlepath)
794 794 fn(bundle)
795 795
796 796 return run
797 797
798 798 def makereadnbytes(size):
799 799 def run():
800 800 with open(bundlepath, b'rb') as fh:
801 801 bundle = exchange.readbundle(ui, fh, bundlepath)
802 802 while bundle.read(size):
803 803 pass
804 804
805 805 return run
806 806
807 807 def makestdioread(size):
808 808 def run():
809 809 with open(bundlepath, b'rb') as fh:
810 810 while fh.read(size):
811 811 pass
812 812
813 813 return run
814 814
815 815 # bundle1
816 816
817 817 def deltaiter(bundle):
818 818 for delta in bundle.deltaiter():
819 819 pass
820 820
821 821 def iterchunks(bundle):
822 822 for chunk in bundle.getchunks():
823 823 pass
824 824
825 825 # bundle2
826 826
827 827 def forwardchunks(bundle):
828 828 for chunk in bundle._forwardchunks():
829 829 pass
830 830
831 831 def iterparts(bundle):
832 832 for part in bundle.iterparts():
833 833 pass
834 834
835 835 def iterpartsseekable(bundle):
836 836 for part in bundle.iterparts(seekable=True):
837 837 pass
838 838
839 839 def seek(bundle):
840 840 for part in bundle.iterparts(seekable=True):
841 841 part.seek(0, os.SEEK_END)
842 842
843 843 def makepartreadnbytes(size):
844 844 def run():
845 845 with open(bundlepath, b'rb') as fh:
846 846 bundle = exchange.readbundle(ui, fh, bundlepath)
847 847 for part in bundle.iterparts():
848 848 while part.read(size):
849 849 pass
850 850
851 851 return run
852 852
853 853 benches = [
854 854 (makestdioread(8192), b'read(8k)'),
855 855 (makestdioread(16384), b'read(16k)'),
856 856 (makestdioread(32768), b'read(32k)'),
857 857 (makestdioread(131072), b'read(128k)'),
858 858 ]
859 859
860 860 with open(bundlepath, b'rb') as fh:
861 861 bundle = exchange.readbundle(ui, fh, bundlepath)
862 862
863 863 if isinstance(bundle, changegroup.cg1unpacker):
864 864 benches.extend([
865 865 (makebench(deltaiter), b'cg1 deltaiter()'),
866 866 (makebench(iterchunks), b'cg1 getchunks()'),
867 867 (makereadnbytes(8192), b'cg1 read(8k)'),
868 868 (makereadnbytes(16384), b'cg1 read(16k)'),
869 869 (makereadnbytes(32768), b'cg1 read(32k)'),
870 870 (makereadnbytes(131072), b'cg1 read(128k)'),
871 871 ])
872 872 elif isinstance(bundle, bundle2.unbundle20):
873 873 benches.extend([
874 874 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
875 875 (makebench(iterparts), b'bundle2 iterparts()'),
876 876 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
877 877 (makebench(seek), b'bundle2 part seek()'),
878 878 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
879 879 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
880 880 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
881 881 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
882 882 ])
883 883 elif isinstance(bundle, streamclone.streamcloneapplier):
884 884 raise error.Abort(b'stream clone bundles not supported')
885 885 else:
886 886 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
887 887
888 888 for fn, title in benches:
889 889 timer, fm = gettimer(ui, opts)
890 890 timer(fn, title=title)
891 891 fm.end()
892 892
893 893 @command(b'perfchangegroupchangelog', formatteropts +
894 894 [(b'', b'cgversion', b'02', b'changegroup version'),
895 895 (b'r', b'rev', b'', b'revisions to add to changegroup')])
896 896 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
897 897 """Benchmark producing a changelog group for a changegroup.
898 898
899 899 This measures the time spent processing the changelog during a
900 900 bundle operation. This occurs during `hg bundle` and on a server
901 901 processing a `getbundle` wire protocol request (handles clones
902 902 and pull requests).
903 903
904 904 By default, all revisions are added to the changegroup.
905 905 """
906 906 opts = _byteskwargs(opts)
907 907 cl = repo.changelog
908 908 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
909 909 bundler = changegroup.getbundler(cgversion, repo)
910 910
911 911 def d():
912 912 state, chunks = bundler._generatechangelog(cl, nodes)
913 913 for chunk in chunks:
914 914 pass
915 915
916 916 timer, fm = gettimer(ui, opts)
917 917
918 918 # Terminal printing can interfere with timing. So disable it.
919 919 with ui.configoverride({(b'progress', b'disable'): True}):
920 920 timer(d)
921 921
922 922 fm.end()
923 923
924 924 @command(b'perfdirs', formatteropts)
925 925 def perfdirs(ui, repo, **opts):
926 926 opts = _byteskwargs(opts)
927 927 timer, fm = gettimer(ui, opts)
928 928 dirstate = repo.dirstate
929 929 b'a' in dirstate
930 930 def d():
931 931 dirstate.hasdir(b'a')
932 932 del dirstate._map._dirs
933 933 timer(d)
934 934 fm.end()
935 935
936 936 @command(b'perfdirstate', formatteropts)
937 937 def perfdirstate(ui, repo, **opts):
938 938 opts = _byteskwargs(opts)
939 939 timer, fm = gettimer(ui, opts)
940 940 b"a" in repo.dirstate
941 941 def d():
942 942 repo.dirstate.invalidate()
943 943 b"a" in repo.dirstate
944 944 timer(d)
945 945 fm.end()
946 946
947 947 @command(b'perfdirstatedirs', formatteropts)
948 948 def perfdirstatedirs(ui, repo, **opts):
949 949 opts = _byteskwargs(opts)
950 950 timer, fm = gettimer(ui, opts)
951 951 b"a" in repo.dirstate
952 952 def d():
953 953 repo.dirstate.hasdir(b"a")
954 954 del repo.dirstate._map._dirs
955 955 timer(d)
956 956 fm.end()
957 957
958 958 @command(b'perfdirstatefoldmap', formatteropts)
959 959 def perfdirstatefoldmap(ui, repo, **opts):
960 960 opts = _byteskwargs(opts)
961 961 timer, fm = gettimer(ui, opts)
962 962 dirstate = repo.dirstate
963 963 b'a' in dirstate
964 964 def d():
965 965 dirstate._map.filefoldmap.get(b'a')
966 966 del dirstate._map.filefoldmap
967 967 timer(d)
968 968 fm.end()
969 969
970 970 @command(b'perfdirfoldmap', formatteropts)
971 971 def perfdirfoldmap(ui, repo, **opts):
972 972 opts = _byteskwargs(opts)
973 973 timer, fm = gettimer(ui, opts)
974 974 dirstate = repo.dirstate
975 975 b'a' in dirstate
976 976 def d():
977 977 dirstate._map.dirfoldmap.get(b'a')
978 978 del dirstate._map.dirfoldmap
979 979 del dirstate._map._dirs
980 980 timer(d)
981 981 fm.end()
982 982
983 983 @command(b'perfdirstatewrite', formatteropts)
984 984 def perfdirstatewrite(ui, repo, **opts):
985 985 opts = _byteskwargs(opts)
986 986 timer, fm = gettimer(ui, opts)
987 987 ds = repo.dirstate
988 988 b"a" in ds
989 989 def d():
990 990 ds._dirty = True
991 991 ds.write(repo.currenttransaction())
992 992 timer(d)
993 993 fm.end()
994 994
995 995 def _getmergerevs(repo, opts):
996 996 """parse command argument to return rev involved in merge
997 997
998 998 input: options dictionnary with `rev`, `from` and `bse`
999 999 output: (localctx, otherctx, basectx)
1000 1000 """
1001 1001 if opts[b'from']:
1002 1002 fromrev = scmutil.revsingle(repo, opts[b'from'])
1003 1003 wctx = repo[fromrev]
1004 1004 else:
1005 1005 wctx = repo[None]
1006 1006 # we don't want working dir files to be stat'd in the benchmark, so
1007 1007 # prime that cache
1008 1008 wctx.dirty()
1009 1009 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1010 1010 if opts[b'base']:
1011 1011 fromrev = scmutil.revsingle(repo, opts[b'base'])
1012 1012 ancestor = repo[fromrev]
1013 1013 else:
1014 1014 ancestor = wctx.ancestor(rctx)
1015 1015 return (wctx, rctx, ancestor)
1016 1016
1017 1017 @command(b'perfmergecalculate',
1018 1018 [
1019 1019 (b'r', b'rev', b'.', b'rev to merge against'),
1020 1020 (b'', b'from', b'', b'rev to merge from'),
1021 1021 (b'', b'base', b'', b'the revision to use as base'),
1022 1022 ] + formatteropts)
1023 1023 def perfmergecalculate(ui, repo, **opts):
1024 1024 opts = _byteskwargs(opts)
1025 1025 timer, fm = gettimer(ui, opts)
1026 1026
1027 1027 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1028 1028 def d():
1029 1029 # acceptremote is True because we don't want prompts in the middle of
1030 1030 # our benchmark
1031 1031 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
1032 1032 acceptremote=True, followcopies=True)
1033 1033 timer(d)
1034 1034 fm.end()
1035 1035
1036 1036 @command(b'perfmergecopies',
1037 1037 [
1038 1038 (b'r', b'rev', b'.', b'rev to merge against'),
1039 1039 (b'', b'from', b'', b'rev to merge from'),
1040 1040 (b'', b'base', b'', b'the revision to use as base'),
1041 1041 ] + formatteropts)
1042 1042 def perfmergecopies(ui, repo, **opts):
1043 1043 """measure runtime of `copies.mergecopies`"""
1044 1044 opts = _byteskwargs(opts)
1045 1045 timer, fm = gettimer(ui, opts)
1046 1046 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1047 1047 def d():
1048 1048 # acceptremote is True because we don't want prompts in the middle of
1049 1049 # our benchmark
1050 1050 copies.mergecopies(repo, wctx, rctx, ancestor)
1051 1051 timer(d)
1052 1052 fm.end()
1053 1053
1054 1054 @command(b'perfpathcopies', [], b"REV REV")
1055 1055 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1056 1056 """benchmark the copy tracing logic"""
1057 1057 opts = _byteskwargs(opts)
1058 1058 timer, fm = gettimer(ui, opts)
1059 1059 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1060 1060 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1061 1061 def d():
1062 1062 copies.pathcopies(ctx1, ctx2)
1063 1063 timer(d)
1064 1064 fm.end()
1065 1065
1066 1066 @command(b'perfphases',
1067 1067 [(b'', b'full', False, b'include file reading time too'),
1068 1068 ], b"")
1069 1069 def perfphases(ui, repo, **opts):
1070 1070 """benchmark phasesets computation"""
1071 1071 opts = _byteskwargs(opts)
1072 1072 timer, fm = gettimer(ui, opts)
1073 1073 _phases = repo._phasecache
1074 1074 full = opts.get(b'full')
1075 1075 def d():
1076 1076 phases = _phases
1077 1077 if full:
1078 1078 clearfilecache(repo, b'_phasecache')
1079 1079 phases = repo._phasecache
1080 1080 phases.invalidate()
1081 1081 phases.loadphaserevs(repo)
1082 1082 timer(d)
1083 1083 fm.end()
1084 1084
1085 1085 @command(b'perfphasesremote',
1086 1086 [], b"[DEST]")
1087 1087 def perfphasesremote(ui, repo, dest=None, **opts):
1088 1088 """benchmark time needed to analyse phases of the remote server"""
1089 1089 from mercurial.node import (
1090 1090 bin,
1091 1091 )
1092 1092 from mercurial import (
1093 1093 exchange,
1094 1094 hg,
1095 1095 phases,
1096 1096 )
1097 1097 opts = _byteskwargs(opts)
1098 1098 timer, fm = gettimer(ui, opts)
1099 1099
1100 1100 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
1101 1101 if not path:
1102 1102 raise error.Abort((b'default repository not configured!'),
1103 1103 hint=(b"see 'hg help config.paths'"))
1104 1104 dest = path.pushloc or path.loc
1105 1105 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
1106 1106 other = hg.peer(repo, opts, dest)
1107 1107
1108 1108 # easier to perform discovery through the operation
1109 1109 op = exchange.pushoperation(repo, other)
1110 1110 exchange._pushdiscoverychangeset(op)
1111 1111
1112 1112 remotesubset = op.fallbackheads
1113 1113
1114 1114 with other.commandexecutor() as e:
1115 1115 remotephases = e.callcommand(b'listkeys',
1116 1116 {b'namespace': b'phases'}).result()
1117 1117 del other
1118 1118 publishing = remotephases.get(b'publishing', False)
1119 1119 if publishing:
1120 1120 ui.status((b'publishing: yes\n'))
1121 1121 else:
1122 1122 ui.status((b'publishing: no\n'))
1123 1123
1124 1124 nodemap = repo.changelog.nodemap
1125 1125 nonpublishroots = 0
1126 1126 for nhex, phase in remotephases.iteritems():
1127 1127 if nhex == b'publishing': # ignore data related to publish option
1128 1128 continue
1129 1129 node = bin(nhex)
1130 1130 if node in nodemap and int(phase):
1131 1131 nonpublishroots += 1
1132 1132 ui.status((b'number of roots: %d\n') % len(remotephases))
1133 1133 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
1134 1134 def d():
1135 1135 phases.remotephasessummary(repo,
1136 1136 remotesubset,
1137 1137 remotephases)
1138 1138 timer(d)
1139 1139 fm.end()
1140 1140
1141 1141 @command(b'perfmanifest',[
1142 1142 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1143 1143 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1144 1144 ] + formatteropts, b'REV|NODE')
1145 1145 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1146 1146 """benchmark the time to read a manifest from disk and return a usable
1147 1147 dict-like object
1148 1148
1149 1149 Manifest caches are cleared before retrieval."""
1150 1150 opts = _byteskwargs(opts)
1151 1151 timer, fm = gettimer(ui, opts)
1152 1152 if not manifest_rev:
1153 1153 ctx = scmutil.revsingle(repo, rev, rev)
1154 1154 t = ctx.manifestnode()
1155 1155 else:
1156 1156 from mercurial.node import bin
1157 1157
1158 1158 if len(rev) == 40:
1159 1159 t = bin(rev)
1160 1160 else:
1161 1161 try:
1162 1162 rev = int(rev)
1163 1163
1164 1164 if util.safehasattr(repo.manifestlog, b'getstorage'):
1165 1165 t = repo.manifestlog.getstorage(b'').node(rev)
1166 1166 else:
1167 1167 t = repo.manifestlog._revlog.lookup(rev)
1168 1168 except ValueError:
1169 1169 raise error.Abort(b'manifest revision must be integer or full '
1170 1170 b'node')
1171 1171 def d():
1172 1172 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1173 1173 repo.manifestlog[t].read()
1174 1174 timer(d)
1175 1175 fm.end()
1176 1176
1177 1177 @command(b'perfchangeset', formatteropts)
1178 1178 def perfchangeset(ui, repo, rev, **opts):
1179 1179 opts = _byteskwargs(opts)
1180 1180 timer, fm = gettimer(ui, opts)
1181 1181 n = scmutil.revsingle(repo, rev).node()
1182 1182 def d():
1183 1183 repo.changelog.read(n)
1184 1184 #repo.changelog._cache = None
1185 1185 timer(d)
1186 1186 fm.end()
1187 1187
1188 1188 @command(b'perfignore', formatteropts)
1189 1189 def perfignore(ui, repo, **opts):
1190 1190 """benchmark operation related to computing ignore"""
1191 1191 opts = _byteskwargs(opts)
1192 1192 timer, fm = gettimer(ui, opts)
1193 1193 dirstate = repo.dirstate
1194 1194
1195 1195 def setupone():
1196 1196 dirstate.invalidate()
1197 1197 clearfilecache(dirstate, b'_ignore')
1198 1198
1199 1199 def runone():
1200 1200 dirstate._ignore
1201 1201
1202 1202 timer(runone, setup=setupone, title=b"load")
1203 1203 fm.end()
1204 1204
1205 1205 @command(b'perfindex', [
1206 1206 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1207 1207 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1208 1208 ] + formatteropts)
1209 1209 def perfindex(ui, repo, **opts):
1210 1210 """benchmark index creation time followed by a lookup
1211 1211
1212 1212 The default is to look `tip` up. Depending on the index implementation,
1213 1213 the revision looked up can matters. For example, an implementation
1214 1214 scanning the index will have a faster lookup time for `--rev tip` than for
1215 1215 `--rev 0`. The number of looked up revisions and their order can also
1216 1216 matters.
1217 1217
1218 1218 Example of useful set to test:
1219 1219 * tip
1220 1220 * 0
1221 1221 * -10:
1222 1222 * :10
1223 1223 * -10: + :10
1224 1224 * :10: + -10:
1225 1225 * -10000:
1226 1226 * -10000: + 0
1227 1227
1228 1228 It is not currently possible to check for lookup of a missing node. For
1229 1229 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1230 1230 import mercurial.revlog
1231 1231 opts = _byteskwargs(opts)
1232 1232 timer, fm = gettimer(ui, opts)
1233 1233 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1234 1234 if opts[b'no_lookup']:
1235 1235 if opts['rev']:
1236 1236 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1237 1237 nodes = []
1238 1238 elif not opts[b'rev']:
1239 1239 nodes = [repo[b"tip"].node()]
1240 1240 else:
1241 1241 revs = scmutil.revrange(repo, opts[b'rev'])
1242 1242 cl = repo.changelog
1243 1243 nodes = [cl.node(r) for r in revs]
1244 1244
1245 1245 unfi = repo.unfiltered()
1246 1246 # find the filecache func directly
1247 1247 # This avoid polluting the benchmark with the filecache logic
1248 1248 makecl = unfi.__class__.changelog.func
1249 1249 def setup():
1250 1250 # probably not necessary, but for good measure
1251 1251 clearchangelog(unfi)
1252 1252 def d():
1253 1253 cl = makecl(unfi)
1254 1254 for n in nodes:
1255 1255 cl.rev(n)
1256 1256 timer(d, setup=setup)
1257 1257 fm.end()
1258 1258
1259 1259 @command(b'perfnodemap', [
1260 1260 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1261 1261 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1262 1262 ] + formatteropts)
1263 1263 def perfnodemap(ui, repo, **opts):
1264 1264 """benchmark the time necessary to look up revision from a cold nodemap
1265 1265
1266 1266 Depending on the implementation, the amount and order of revision we look
1267 1267 up can varies. Example of useful set to test:
1268 1268 * tip
1269 1269 * 0
1270 1270 * -10:
1271 1271 * :10
1272 1272 * -10: + :10
1273 1273 * :10: + -10:
1274 1274 * -10000:
1275 1275 * -10000: + 0
1276 1276
1277 1277 The command currently focus on valid binary lookup. Benchmarking for
1278 1278 hexlookup, prefix lookup and missing lookup would also be valuable.
1279 1279 """
1280 1280 import mercurial.revlog
1281 1281 opts = _byteskwargs(opts)
1282 1282 timer, fm = gettimer(ui, opts)
1283 1283 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1284 1284
1285 1285 unfi = repo.unfiltered()
1286 1286 clearcaches = opts['clear_caches']
1287 1287 # find the filecache func directly
1288 1288 # This avoid polluting the benchmark with the filecache logic
1289 1289 makecl = unfi.__class__.changelog.func
1290 1290 if not opts[b'rev']:
1291 1291 raise error.Abort('use --rev to specify revisions to look up')
1292 1292 revs = scmutil.revrange(repo, opts[b'rev'])
1293 1293 cl = repo.changelog
1294 1294 nodes = [cl.node(r) for r in revs]
1295 1295
1296 1296 # use a list to pass reference to a nodemap from one closure to the next
1297 1297 nodeget = [None]
1298 1298 def setnodeget():
1299 1299 # probably not necessary, but for good measure
1300 1300 clearchangelog(unfi)
1301 1301 nodeget[0] = makecl(unfi).nodemap.get
1302 1302
1303 1303 def d():
1304 1304 get = nodeget[0]
1305 1305 for n in nodes:
1306 1306 get(n)
1307 1307
1308 1308 setup = None
1309 1309 if clearcaches:
1310 1310 def setup():
1311 1311 setnodeget()
1312 1312 else:
1313 1313 setnodeget()
1314 1314 d() # prewarm the data structure
1315 1315 timer(d, setup=setup)
1316 1316 fm.end()
1317 1317
1318 1318 @command(b'perfstartup', formatteropts)
1319 1319 def perfstartup(ui, repo, **opts):
1320 1320 opts = _byteskwargs(opts)
1321 1321 timer, fm = gettimer(ui, opts)
1322 1322 def d():
1323 1323 if os.name != r'nt':
1324 1324 os.system(b"HGRCPATH= %s version -q > /dev/null" %
1325 1325 fsencode(sys.argv[0]))
1326 1326 else:
1327 1327 os.environ[r'HGRCPATH'] = r' '
1328 1328 os.system(r"%s version -q > NUL" % sys.argv[0])
1329 1329 timer(d)
1330 1330 fm.end()
1331 1331
1332 1332 @command(b'perfparents', formatteropts)
1333 1333 def perfparents(ui, repo, **opts):
1334 1334 """benchmark the time necessary to fetch one changeset's parents.
1335 1335
1336 1336 The fetch is done using the `node identifier`, traversing all object layers
1337 1337 from the repository object. The first N revisions will be used for this
1338 1338 benchmark. N is controlled by the ``perf.parentscount`` config option
1339 1339 (default: 1000).
1340 1340 """
1341 1341 opts = _byteskwargs(opts)
1342 1342 timer, fm = gettimer(ui, opts)
1343 1343 # control the number of commits perfparents iterates over
1344 1344 # experimental config: perf.parentscount
1345 1345 count = getint(ui, b"perf", b"parentscount", 1000)
1346 1346 if len(repo.changelog) < count:
1347 1347 raise error.Abort(b"repo needs %d commits for this test" % count)
1348 1348 repo = repo.unfiltered()
1349 1349 nl = [repo.changelog.node(i) for i in _xrange(count)]
1350 1350 def d():
1351 1351 for n in nl:
1352 1352 repo.changelog.parents(n)
1353 1353 timer(d)
1354 1354 fm.end()
1355 1355
1356 1356 @command(b'perfctxfiles', formatteropts)
1357 1357 def perfctxfiles(ui, repo, x, **opts):
1358 1358 opts = _byteskwargs(opts)
1359 1359 x = int(x)
1360 1360 timer, fm = gettimer(ui, opts)
1361 1361 def d():
1362 1362 len(repo[x].files())
1363 1363 timer(d)
1364 1364 fm.end()
1365 1365
1366 1366 @command(b'perfrawfiles', formatteropts)
1367 1367 def perfrawfiles(ui, repo, x, **opts):
1368 1368 opts = _byteskwargs(opts)
1369 1369 x = int(x)
1370 1370 timer, fm = gettimer(ui, opts)
1371 1371 cl = repo.changelog
1372 1372 def d():
1373 1373 len(cl.read(x)[3])
1374 1374 timer(d)
1375 1375 fm.end()
1376 1376
1377 1377 @command(b'perflookup', formatteropts)
1378 1378 def perflookup(ui, repo, rev, **opts):
1379 1379 opts = _byteskwargs(opts)
1380 1380 timer, fm = gettimer(ui, opts)
1381 1381 timer(lambda: len(repo.lookup(rev)))
1382 1382 fm.end()
1383 1383
1384 1384 @command(b'perflinelogedits',
1385 1385 [(b'n', b'edits', 10000, b'number of edits'),
1386 1386 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1387 1387 ], norepo=True)
1388 1388 def perflinelogedits(ui, **opts):
1389 1389 from mercurial import linelog
1390 1390
1391 1391 opts = _byteskwargs(opts)
1392 1392
1393 1393 edits = opts[b'edits']
1394 1394 maxhunklines = opts[b'max_hunk_lines']
1395 1395
1396 1396 maxb1 = 100000
1397 1397 random.seed(0)
1398 1398 randint = random.randint
1399 1399 currentlines = 0
1400 1400 arglist = []
1401 1401 for rev in _xrange(edits):
1402 1402 a1 = randint(0, currentlines)
1403 1403 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1404 1404 b1 = randint(0, maxb1)
1405 1405 b2 = randint(b1, b1 + maxhunklines)
1406 1406 currentlines += (b2 - b1) - (a2 - a1)
1407 1407 arglist.append((rev, a1, a2, b1, b2))
1408 1408
1409 1409 def d():
1410 1410 ll = linelog.linelog()
1411 1411 for args in arglist:
1412 1412 ll.replacelines(*args)
1413 1413
1414 1414 timer, fm = gettimer(ui, opts)
1415 1415 timer(d)
1416 1416 fm.end()
1417 1417
1418 1418 @command(b'perfrevrange', formatteropts)
1419 1419 def perfrevrange(ui, repo, *specs, **opts):
1420 1420 opts = _byteskwargs(opts)
1421 1421 timer, fm = gettimer(ui, opts)
1422 1422 revrange = scmutil.revrange
1423 1423 timer(lambda: len(revrange(repo, specs)))
1424 1424 fm.end()
1425 1425
1426 1426 @command(b'perfnodelookup', formatteropts)
1427 1427 def perfnodelookup(ui, repo, rev, **opts):
1428 1428 opts = _byteskwargs(opts)
1429 1429 timer, fm = gettimer(ui, opts)
1430 1430 import mercurial.revlog
1431 1431 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1432 1432 n = scmutil.revsingle(repo, rev).node()
1433 1433 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1434 1434 def d():
1435 1435 cl.rev(n)
1436 1436 clearcaches(cl)
1437 1437 timer(d)
1438 1438 fm.end()
1439 1439
1440 1440 @command(b'perflog',
1441 1441 [(b'', b'rename', False, b'ask log to follow renames')
1442 1442 ] + formatteropts)
1443 1443 def perflog(ui, repo, rev=None, **opts):
1444 1444 opts = _byteskwargs(opts)
1445 1445 if rev is None:
1446 1446 rev=[]
1447 1447 timer, fm = gettimer(ui, opts)
1448 1448 ui.pushbuffer()
1449 1449 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1450 1450 copies=opts.get(b'rename')))
1451 1451 ui.popbuffer()
1452 1452 fm.end()
1453 1453
1454 1454 @command(b'perfmoonwalk', formatteropts)
1455 1455 def perfmoonwalk(ui, repo, **opts):
1456 1456 """benchmark walking the changelog backwards
1457 1457
1458 1458 This also loads the changelog data for each revision in the changelog.
1459 1459 """
1460 1460 opts = _byteskwargs(opts)
1461 1461 timer, fm = gettimer(ui, opts)
1462 1462 def moonwalk():
1463 1463 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1464 1464 ctx = repo[i]
1465 1465 ctx.branch() # read changelog data (in addition to the index)
1466 1466 timer(moonwalk)
1467 1467 fm.end()
1468 1468
1469 1469 @command(b'perftemplating',
1470 1470 [(b'r', b'rev', [], b'revisions to run the template on'),
1471 1471 ] + formatteropts)
1472 1472 def perftemplating(ui, repo, testedtemplate=None, **opts):
1473 1473 """test the rendering time of a given template"""
1474 1474 if makelogtemplater is None:
1475 1475 raise error.Abort((b"perftemplating not available with this Mercurial"),
1476 1476 hint=b"use 4.3 or later")
1477 1477
1478 1478 opts = _byteskwargs(opts)
1479 1479
1480 1480 nullui = ui.copy()
1481 1481 nullui.fout = open(os.devnull, r'wb')
1482 1482 nullui.disablepager()
1483 1483 revs = opts.get(b'rev')
1484 1484 if not revs:
1485 1485 revs = [b'all()']
1486 1486 revs = list(scmutil.revrange(repo, revs))
1487 1487
1488 1488 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1489 1489 b' {author|person}: {desc|firstline}\n')
1490 1490 if testedtemplate is None:
1491 1491 testedtemplate = defaulttemplate
1492 1492 displayer = makelogtemplater(nullui, repo, testedtemplate)
1493 1493 def format():
1494 1494 for r in revs:
1495 1495 ctx = repo[r]
1496 1496 displayer.show(ctx)
1497 1497 displayer.flush(ctx)
1498 1498
1499 1499 timer, fm = gettimer(ui, opts)
1500 1500 timer(format)
1501 1501 fm.end()
1502 1502
1503 1503 @command(b'perfhelper-mergecopies', formatteropts +
1504 1504 [
1505 1505 (b'r', b'revs', [], b'restrict search to these revisions'),
1506 1506 (b'', b'timing', False, b'provides extra data (costly)'),
1507 1507 ])
1508 1508 def perfhelpermergecopies(ui, repo, revs=[], **opts):
1509 1509 """find statistics about potential parameters for `perfmergecopies`
1510 1510
1511 1511 This command find (base, p1, p2) triplet relevant for copytracing
1512 1512 benchmarking in the context of a merge. It reports values for some of the
1513 1513 parameters that impact merge copy tracing time during merge.
1514 1514
1515 1515 If `--timing` is set, rename detection is run and the associated timing
1516 1516 will be reported. The extra details come at the cost of slower command
1517 1517 execution.
1518 1518
1519 1519 Since rename detection is only run once, other factors might easily
1520 1520 affect the precision of the timing. However it should give a good
1521 1521 approximation of which revision triplets are very costly.
1522 1522 """
1523 1523 opts = _byteskwargs(opts)
1524 1524 fm = ui.formatter(b'perf', opts)
1525 1525 dotiming = opts[b'timing']
1526 1526
1527 1527 output_template = [
1528 1528 ("base", "%(base)12s"),
1529 1529 ("p1", "%(p1.node)12s"),
1530 1530 ("p2", "%(p2.node)12s"),
1531 1531 ("p1.nb-revs", "%(p1.nbrevs)12d"),
1532 1532 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
1533 1533 ("p1.renames", "%(p1.renamedfiles)12d"),
1534 1534 ("p1.time", "%(p1.time)12.3f"),
1535 1535 ("p2.nb-revs", "%(p2.nbrevs)12d"),
1536 1536 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
1537 1537 ("p2.renames", "%(p2.renamedfiles)12d"),
1538 1538 ("p2.time", "%(p2.time)12.3f"),
1539 1539 ("renames", "%(nbrenamedfiles)12d"),
1540 1540 ("total.time", "%(time)12.3f"),
1541 1541 ]
1542 1542 if not dotiming:
1543 1543 output_template = [i for i in output_template
1544 1544 if not ('time' in i[0] or 'renames' in i[0])]
1545 1545 header_names = [h for (h, v) in output_template]
1546 1546 output = ' '.join([v for (h, v) in output_template]) + '\n'
1547 1547 header = ' '.join(['%12s'] * len(header_names)) + '\n'
1548 1548 fm.plain(header % tuple(header_names))
1549 1549
1550 1550 if not revs:
1551 1551 revs = ['all()']
1552 1552 revs = scmutil.revrange(repo, revs)
1553 1553
1554 1554 roi = repo.revs('merge() and %ld', revs)
1555 1555 for r in roi:
1556 1556 ctx = repo[r]
1557 1557 p1 = ctx.p1()
1558 1558 p2 = ctx.p2()
1559 1559 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
1560 1560 for b in bases:
1561 1561 b = repo[b]
1562 1562 p1missing = copies._computeforwardmissing(b, p1)
1563 1563 p2missing = copies._computeforwardmissing(b, p2)
1564 1564 data = {
1565 1565 b'base': b.hex(),
1566 1566 b'p1.node': p1.hex(),
1567 1567 b'p1.nbrevs': len(repo.revs('%d::%d', b.rev(), p1.rev())),
1568 1568 b'p1.nbmissingfiles': len(p1missing),
1569 1569 b'p2.node': p2.hex(),
1570 1570 b'p2.nbrevs': len(repo.revs('%d::%d', b.rev(), p2.rev())),
1571 1571 b'p2.nbmissingfiles': len(p2missing),
1572 1572 }
1573 1573 if dotiming:
1574 1574 begin = util.timer()
1575 1575 mergedata = copies.mergecopies(repo, p1, p2, b)
1576 1576 end = util.timer()
1577 1577 # not very stable timing since we did only one run
1578 1578 data['time'] = end - begin
1579 1579 # mergedata contains five dicts: "copy", "movewithdir",
1580 1580 # "diverge", "renamedelete" and "dirmove".
1581 1581 # The first 4 are about renamed file so lets count that.
1582 1582 renames = len(mergedata[0])
1583 1583 renames += len(mergedata[1])
1584 1584 renames += len(mergedata[2])
1585 1585 renames += len(mergedata[3])
1586 1586 data['nbrenamedfiles'] = renames
1587 1587 begin = util.timer()
1588 1588 p1renames = copies.pathcopies(b, p1)
1589 1589 end = util.timer()
1590 1590 data['p1.time'] = end - begin
1591 1591 begin = util.timer()
1592 1592 p2renames = copies.pathcopies(b, p2)
1593 1593 data['p2.time'] = end - begin
1594 1594 end = util.timer()
1595 1595 data['p1.renamedfiles'] = len(p1renames)
1596 1596 data['p2.renamedfiles'] = len(p2renames)
1597 1597 fm.startitem()
1598 1598 fm.data(**data)
1599 1599 # make node pretty for the human output
1600 1600 out = data.copy()
1601 1601 out['base'] = fm.hexfunc(b.node())
1602 1602 out['p1.node'] = fm.hexfunc(p1.node())
1603 1603 out['p2.node'] = fm.hexfunc(p2.node())
1604 1604 fm.plain(output % out)
1605 1605
1606 1606 fm.end()
1607 1607
1608 1608 @command(b'perfhelper-pathcopies', formatteropts +
1609 1609 [
1610 1610 (b'r', b'revs', [], b'restrict search to these revisions'),
1611 1611 (b'', b'timing', False, b'provides extra data (costly)'),
1612 1612 ])
1613 1613 def perfhelperpathcopies(ui, repo, revs=[], **opts):
1614 1614 """find statistic about potential parameters for the `perftracecopies`
1615 1615
1616 1616 This command find source-destination pair relevant for copytracing testing.
1617 1617 It report value for some of the parameters that impact copy tracing time.
1618 1618
1619 1619 If `--timing` is set, rename detection is run and the associated timing
1620 1620 will be reported. The extra details comes at the cost of a slower command
1621 1621 execution.
1622 1622
1623 1623 Since the rename detection is only run once, other factors might easily
1624 1624 affect the precision of the timing. However it should give a good
1625 1625 approximation of which revision pairs are very costly.
1626 1626 """
1627 1627 opts = _byteskwargs(opts)
1628 1628 fm = ui.formatter(b'perf', opts)
1629 1629 dotiming = opts[b'timing']
1630 1630
1631 1631 if dotiming:
1632 1632 header = '%12s %12s %12s %12s %12s %12s\n'
1633 1633 output = ("%(source)12s %(destination)12s "
1634 1634 "%(nbrevs)12d %(nbmissingfiles)12d "
1635 1635 "%(nbrenamedfiles)12d %(time)18.5f\n")
1636 1636 header_names = ("source", "destination", "nb-revs", "nb-files",
1637 1637 "nb-renames", "time")
1638 1638 fm.plain(header % header_names)
1639 1639 else:
1640 1640 header = '%12s %12s %12s %12s\n'
1641 1641 output = ("%(source)12s %(destination)12s "
1642 1642 "%(nbrevs)12d %(nbmissingfiles)12d\n")
1643 1643 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
1644 1644
1645 1645 if not revs:
1646 1646 revs = ['all()']
1647 1647 revs = scmutil.revrange(repo, revs)
1648 1648
1649 1649 roi = repo.revs('merge() and %ld', revs)
1650 1650 for r in roi:
1651 1651 ctx = repo[r]
1652 1652 p1 = ctx.p1().rev()
1653 1653 p2 = ctx.p2().rev()
1654 1654 bases = repo.changelog._commonancestorsheads(p1, p2)
1655 1655 for p in (p1, p2):
1656 1656 for b in bases:
1657 1657 base = repo[b]
1658 1658 parent = repo[p]
1659 1659 missing = copies._computeforwardmissing(base, parent)
1660 1660 if not missing:
1661 1661 continue
1662 1662 data = {
1663 1663 b'source': base.hex(),
1664 1664 b'destination': parent.hex(),
1665 1665 b'nbrevs': len(repo.revs('%d::%d', b, p)),
1666 1666 b'nbmissingfiles': len(missing),
1667 1667 }
1668 1668 if dotiming:
1669 1669 begin = util.timer()
1670 1670 renames = copies.pathcopies(base, parent)
1671 1671 end = util.timer()
1672 1672 # not very stable timing since we did only one run
1673 1673 data['time'] = end - begin
1674 1674 data['nbrenamedfiles'] = len(renames)
1675 1675 fm.startitem()
1676 1676 fm.data(**data)
1677 1677 out = data.copy()
1678 1678 out['source'] = fm.hexfunc(base.node())
1679 1679 out['destination'] = fm.hexfunc(parent.node())
1680 1680 fm.plain(output % out)
1681 1681
1682 1682 fm.end()
1683 1683
1684 1684 @command(b'perfcca', formatteropts)
1685 1685 def perfcca(ui, repo, **opts):
1686 1686 opts = _byteskwargs(opts)
1687 1687 timer, fm = gettimer(ui, opts)
1688 1688 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1689 1689 fm.end()
1690 1690
1691 1691 @command(b'perffncacheload', formatteropts)
1692 1692 def perffncacheload(ui, repo, **opts):
1693 1693 opts = _byteskwargs(opts)
1694 1694 timer, fm = gettimer(ui, opts)
1695 1695 s = repo.store
1696 1696 def d():
1697 1697 s.fncache._load()
1698 1698 timer(d)
1699 1699 fm.end()
1700 1700
1701 1701 @command(b'perffncachewrite', formatteropts)
1702 1702 def perffncachewrite(ui, repo, **opts):
1703 1703 opts = _byteskwargs(opts)
1704 1704 timer, fm = gettimer(ui, opts)
1705 1705 s = repo.store
1706 1706 lock = repo.lock()
1707 1707 s.fncache._load()
1708 1708 tr = repo.transaction(b'perffncachewrite')
1709 1709 tr.addbackup(b'fncache')
1710 1710 def d():
1711 1711 s.fncache._dirty = True
1712 1712 s.fncache.write(tr)
1713 1713 timer(d)
1714 1714 tr.close()
1715 1715 lock.release()
1716 1716 fm.end()
1717 1717
1718 1718 @command(b'perffncacheencode', formatteropts)
1719 1719 def perffncacheencode(ui, repo, **opts):
1720 1720 opts = _byteskwargs(opts)
1721 1721 timer, fm = gettimer(ui, opts)
1722 1722 s = repo.store
1723 1723 s.fncache._load()
1724 1724 def d():
1725 1725 for p in s.fncache.entries:
1726 1726 s.encode(p)
1727 1727 timer(d)
1728 1728 fm.end()
1729 1729
1730 1730 def _bdiffworker(q, blocks, xdiff, ready, done):
1731 1731 while not done.is_set():
1732 1732 pair = q.get()
1733 1733 while pair is not None:
1734 1734 if xdiff:
1735 1735 mdiff.bdiff.xdiffblocks(*pair)
1736 1736 elif blocks:
1737 1737 mdiff.bdiff.blocks(*pair)
1738 1738 else:
1739 1739 mdiff.textdiff(*pair)
1740 1740 q.task_done()
1741 1741 pair = q.get()
1742 1742 q.task_done() # for the None one
1743 1743 with ready:
1744 1744 ready.wait()
1745 1745
1746 1746 def _manifestrevision(repo, mnode):
1747 1747 ml = repo.manifestlog
1748 1748
1749 1749 if util.safehasattr(ml, b'getstorage'):
1750 1750 store = ml.getstorage(b'')
1751 1751 else:
1752 1752 store = ml._revlog
1753 1753
1754 1754 return store.revision(mnode)
1755 1755
1756 1756 @command(b'perfbdiff', revlogopts + formatteropts + [
1757 1757 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1758 1758 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1759 1759 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1760 1760 (b'', b'blocks', False, b'test computing diffs into blocks'),
1761 1761 (b'', b'xdiff', False, b'use xdiff algorithm'),
1762 1762 ],
1763 1763
1764 1764 b'-c|-m|FILE REV')
1765 1765 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1766 1766 """benchmark a bdiff between revisions
1767 1767
1768 1768 By default, benchmark a bdiff between its delta parent and itself.
1769 1769
1770 1770 With ``--count``, benchmark bdiffs between delta parents and self for N
1771 1771 revisions starting at the specified revision.
1772 1772
1773 1773 With ``--alldata``, assume the requested revision is a changeset and
1774 1774 measure bdiffs for all changes related to that changeset (manifest
1775 1775 and filelogs).
1776 1776 """
1777 1777 opts = _byteskwargs(opts)
1778 1778
1779 1779 if opts[b'xdiff'] and not opts[b'blocks']:
1780 1780 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1781 1781
1782 1782 if opts[b'alldata']:
1783 1783 opts[b'changelog'] = True
1784 1784
1785 1785 if opts.get(b'changelog') or opts.get(b'manifest'):
1786 1786 file_, rev = None, file_
1787 1787 elif rev is None:
1788 1788 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1789 1789
1790 1790 blocks = opts[b'blocks']
1791 1791 xdiff = opts[b'xdiff']
1792 1792 textpairs = []
1793 1793
1794 1794 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1795 1795
1796 1796 startrev = r.rev(r.lookup(rev))
1797 1797 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1798 1798 if opts[b'alldata']:
1799 1799 # Load revisions associated with changeset.
1800 1800 ctx = repo[rev]
1801 1801 mtext = _manifestrevision(repo, ctx.manifestnode())
1802 1802 for pctx in ctx.parents():
1803 1803 pman = _manifestrevision(repo, pctx.manifestnode())
1804 1804 textpairs.append((pman, mtext))
1805 1805
1806 1806 # Load filelog revisions by iterating manifest delta.
1807 1807 man = ctx.manifest()
1808 1808 pman = ctx.p1().manifest()
1809 1809 for filename, change in pman.diff(man).items():
1810 1810 fctx = repo.file(filename)
1811 1811 f1 = fctx.revision(change[0][0] or -1)
1812 1812 f2 = fctx.revision(change[1][0] or -1)
1813 1813 textpairs.append((f1, f2))
1814 1814 else:
1815 1815 dp = r.deltaparent(rev)
1816 1816 textpairs.append((r.revision(dp), r.revision(rev)))
1817 1817
1818 1818 withthreads = threads > 0
1819 1819 if not withthreads:
1820 1820 def d():
1821 1821 for pair in textpairs:
1822 1822 if xdiff:
1823 1823 mdiff.bdiff.xdiffblocks(*pair)
1824 1824 elif blocks:
1825 1825 mdiff.bdiff.blocks(*pair)
1826 1826 else:
1827 1827 mdiff.textdiff(*pair)
1828 1828 else:
1829 1829 q = queue()
1830 1830 for i in _xrange(threads):
1831 1831 q.put(None)
1832 1832 ready = threading.Condition()
1833 1833 done = threading.Event()
1834 1834 for i in _xrange(threads):
1835 1835 threading.Thread(target=_bdiffworker,
1836 1836 args=(q, blocks, xdiff, ready, done)).start()
1837 1837 q.join()
1838 1838 def d():
1839 1839 for pair in textpairs:
1840 1840 q.put(pair)
1841 1841 for i in _xrange(threads):
1842 1842 q.put(None)
1843 1843 with ready:
1844 1844 ready.notify_all()
1845 1845 q.join()
1846 1846 timer, fm = gettimer(ui, opts)
1847 1847 timer(d)
1848 1848 fm.end()
1849 1849
1850 1850 if withthreads:
1851 1851 done.set()
1852 1852 for i in _xrange(threads):
1853 1853 q.put(None)
1854 1854 with ready:
1855 1855 ready.notify_all()
1856 1856
1857 1857 @command(b'perfunidiff', revlogopts + formatteropts + [
1858 1858 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1859 1859 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
1860 1860 ], b'-c|-m|FILE REV')
1861 1861 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1862 1862 """benchmark a unified diff between revisions
1863 1863
1864 1864 This doesn't include any copy tracing - it's just a unified diff
1865 1865 of the texts.
1866 1866
1867 1867 By default, benchmark a diff between its delta parent and itself.
1868 1868
1869 1869 With ``--count``, benchmark diffs between delta parents and self for N
1870 1870 revisions starting at the specified revision.
1871 1871
1872 1872 With ``--alldata``, assume the requested revision is a changeset and
1873 1873 measure diffs for all changes related to that changeset (manifest
1874 1874 and filelogs).
1875 1875 """
1876 1876 opts = _byteskwargs(opts)
1877 1877 if opts[b'alldata']:
1878 1878 opts[b'changelog'] = True
1879 1879
1880 1880 if opts.get(b'changelog') or opts.get(b'manifest'):
1881 1881 file_, rev = None, file_
1882 1882 elif rev is None:
1883 1883 raise error.CommandError(b'perfunidiff', b'invalid arguments')
1884 1884
1885 1885 textpairs = []
1886 1886
1887 1887 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
1888 1888
1889 1889 startrev = r.rev(r.lookup(rev))
1890 1890 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1891 1891 if opts[b'alldata']:
1892 1892 # Load revisions associated with changeset.
1893 1893 ctx = repo[rev]
1894 1894 mtext = _manifestrevision(repo, ctx.manifestnode())
1895 1895 for pctx in ctx.parents():
1896 1896 pman = _manifestrevision(repo, pctx.manifestnode())
1897 1897 textpairs.append((pman, mtext))
1898 1898
1899 1899 # Load filelog revisions by iterating manifest delta.
1900 1900 man = ctx.manifest()
1901 1901 pman = ctx.p1().manifest()
1902 1902 for filename, change in pman.diff(man).items():
1903 1903 fctx = repo.file(filename)
1904 1904 f1 = fctx.revision(change[0][0] or -1)
1905 1905 f2 = fctx.revision(change[1][0] or -1)
1906 1906 textpairs.append((f1, f2))
1907 1907 else:
1908 1908 dp = r.deltaparent(rev)
1909 1909 textpairs.append((r.revision(dp), r.revision(rev)))
1910 1910
1911 1911 def d():
1912 1912 for left, right in textpairs:
1913 1913 # The date strings don't matter, so we pass empty strings.
1914 1914 headerlines, hunks = mdiff.unidiff(
1915 1915 left, b'', right, b'', b'left', b'right', binary=False)
1916 1916 # consume iterators in roughly the way patch.py does
1917 1917 b'\n'.join(headerlines)
1918 1918 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1919 1919 timer, fm = gettimer(ui, opts)
1920 1920 timer(d)
1921 1921 fm.end()
1922 1922
1923 1923 @command(b'perfdiffwd', formatteropts)
1924 1924 def perfdiffwd(ui, repo, **opts):
1925 1925 """Profile diff of working directory changes"""
1926 1926 opts = _byteskwargs(opts)
1927 1927 timer, fm = gettimer(ui, opts)
1928 1928 options = {
1929 1929 'w': 'ignore_all_space',
1930 1930 'b': 'ignore_space_change',
1931 1931 'B': 'ignore_blank_lines',
1932 1932 }
1933 1933
1934 1934 for diffopt in ('', 'w', 'b', 'B', 'wB'):
1935 1935 opts = dict((options[c], b'1') for c in diffopt)
1936 1936 def d():
1937 1937 ui.pushbuffer()
1938 1938 commands.diff(ui, repo, **opts)
1939 1939 ui.popbuffer()
1940 1940 diffopt = diffopt.encode('ascii')
1941 1941 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
1942 1942 timer(d, title=title)
1943 1943 fm.end()
1944 1944
1945 1945 @command(b'perfrevlogindex', revlogopts + formatteropts,
1946 1946 b'-c|-m|FILE')
1947 1947 def perfrevlogindex(ui, repo, file_=None, **opts):
1948 1948 """Benchmark operations against a revlog index.
1949 1949
1950 1950 This tests constructing a revlog instance, reading index data,
1951 1951 parsing index data, and performing various operations related to
1952 1952 index data.
1953 1953 """
1954 1954
1955 1955 opts = _byteskwargs(opts)
1956 1956
1957 1957 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
1958 1958
1959 1959 opener = getattr(rl, 'opener') # trick linter
1960 1960 indexfile = rl.indexfile
1961 1961 data = opener.read(indexfile)
1962 1962
1963 1963 header = struct.unpack(b'>I', data[0:4])[0]
1964 1964 version = header & 0xFFFF
1965 1965 if version == 1:
1966 1966 revlogio = revlog.revlogio()
1967 1967 inline = header & (1 << 16)
1968 1968 else:
1969 1969 raise error.Abort((b'unsupported revlog version: %d') % version)
1970 1970
1971 1971 rllen = len(rl)
1972 1972
1973 1973 node0 = rl.node(0)
1974 1974 node25 = rl.node(rllen // 4)
1975 1975 node50 = rl.node(rllen // 2)
1976 1976 node75 = rl.node(rllen // 4 * 3)
1977 1977 node100 = rl.node(rllen - 1)
1978 1978
1979 1979 allrevs = range(rllen)
1980 1980 allrevsrev = list(reversed(allrevs))
1981 1981 allnodes = [rl.node(rev) for rev in range(rllen)]
1982 1982 allnodesrev = list(reversed(allnodes))
1983 1983
1984 1984 def constructor():
1985 1985 revlog.revlog(opener, indexfile)
1986 1986
1987 1987 def read():
1988 1988 with opener(indexfile) as fh:
1989 1989 fh.read()
1990 1990
1991 1991 def parseindex():
1992 1992 revlogio.parseindex(data, inline)
1993 1993
1994 1994 def getentry(revornode):
1995 1995 index = revlogio.parseindex(data, inline)[0]
1996 1996 index[revornode]
1997 1997
1998 1998 def getentries(revs, count=1):
1999 1999 index = revlogio.parseindex(data, inline)[0]
2000 2000
2001 2001 for i in range(count):
2002 2002 for rev in revs:
2003 2003 index[rev]
2004 2004
2005 2005 def resolvenode(node):
2006 2006 nodemap = revlogio.parseindex(data, inline)[1]
2007 2007 # This only works for the C code.
2008 2008 if nodemap is None:
2009 2009 return
2010 2010
2011 2011 try:
2012 2012 nodemap[node]
2013 2013 except error.RevlogError:
2014 2014 pass
2015 2015
2016 2016 def resolvenodes(nodes, count=1):
2017 2017 nodemap = revlogio.parseindex(data, inline)[1]
2018 2018 if nodemap is None:
2019 2019 return
2020 2020
2021 2021 for i in range(count):
2022 2022 for node in nodes:
2023 2023 try:
2024 2024 nodemap[node]
2025 2025 except error.RevlogError:
2026 2026 pass
2027 2027
2028 2028 benches = [
2029 2029 (constructor, b'revlog constructor'),
2030 2030 (read, b'read'),
2031 2031 (parseindex, b'create index object'),
2032 2032 (lambda: getentry(0), b'retrieve index entry for rev 0'),
2033 2033 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
2034 2034 (lambda: resolvenode(node0), b'look up node at rev 0'),
2035 2035 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
2036 2036 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
2037 2037 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
2038 2038 (lambda: resolvenode(node100), b'look up node at tip'),
2039 2039 # 2x variation is to measure caching impact.
2040 2040 (lambda: resolvenodes(allnodes),
2041 2041 b'look up all nodes (forward)'),
2042 2042 (lambda: resolvenodes(allnodes, 2),
2043 2043 b'look up all nodes 2x (forward)'),
2044 2044 (lambda: resolvenodes(allnodesrev),
2045 2045 b'look up all nodes (reverse)'),
2046 2046 (lambda: resolvenodes(allnodesrev, 2),
2047 2047 b'look up all nodes 2x (reverse)'),
2048 2048 (lambda: getentries(allrevs),
2049 2049 b'retrieve all index entries (forward)'),
2050 2050 (lambda: getentries(allrevs, 2),
2051 2051 b'retrieve all index entries 2x (forward)'),
2052 2052 (lambda: getentries(allrevsrev),
2053 2053 b'retrieve all index entries (reverse)'),
2054 2054 (lambda: getentries(allrevsrev, 2),
2055 2055 b'retrieve all index entries 2x (reverse)'),
2056 2056 ]
2057 2057
2058 2058 for fn, title in benches:
2059 2059 timer, fm = gettimer(ui, opts)
2060 2060 timer(fn, title=title)
2061 2061 fm.end()
2062 2062
2063 2063 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
2064 2064 [(b'd', b'dist', 100, b'distance between the revisions'),
2065 2065 (b's', b'startrev', 0, b'revision to start reading at'),
2066 2066 (b'', b'reverse', False, b'read in reverse')],
2067 2067 b'-c|-m|FILE')
2068 2068 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
2069 2069 **opts):
2070 2070 """Benchmark reading a series of revisions from a revlog.
2071 2071
2072 2072 By default, we read every ``-d/--dist`` revision from 0 to tip of
2073 2073 the specified revlog.
2074 2074
2075 2075 The start revision can be defined via ``-s/--startrev``.
2076 2076 """
2077 2077 opts = _byteskwargs(opts)
2078 2078
2079 2079 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
2080 2080 rllen = getlen(ui)(rl)
2081 2081
2082 2082 if startrev < 0:
2083 2083 startrev = rllen + startrev
2084 2084
2085 2085 def d():
2086 2086 rl.clearcaches()
2087 2087
2088 2088 beginrev = startrev
2089 2089 endrev = rllen
2090 2090 dist = opts[b'dist']
2091 2091
2092 2092 if reverse:
2093 2093 beginrev, endrev = endrev - 1, beginrev - 1
2094 2094 dist = -1 * dist
2095 2095
2096 2096 for x in _xrange(beginrev, endrev, dist):
2097 2097 # Old revisions don't support passing int.
2098 2098 n = rl.node(x)
2099 2099 rl.revision(n)
2100 2100
2101 2101 timer, fm = gettimer(ui, opts)
2102 2102 timer(d)
2103 2103 fm.end()
2104 2104
2105 2105 @command(b'perfrevlogwrite', revlogopts + formatteropts +
2106 2106 [(b's', b'startrev', 1000, b'revision to start writing at'),
2107 2107 (b'', b'stoprev', -1, b'last revision to write'),
2108 2108 (b'', b'count', 3, b'number of passes to perform'),
2109 2109 (b'', b'details', False, b'print timing for every revisions tested'),
2110 2110 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
2111 2111 (b'', b'lazydeltabase', True, b'try the provided delta first'),
2112 2112 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
2113 2113 ],
2114 2114 b'-c|-m|FILE')
2115 2115 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
2116 2116 """Benchmark writing a series of revisions to a revlog.
2117 2117
2118 2118 Possible source values are:
2119 2119 * `full`: add from a full text (default).
2120 2120 * `parent-1`: add from a delta to the first parent
2121 2121 * `parent-2`: add from a delta to the second parent if it exists
2122 2122 (use a delta from the first parent otherwise)
2123 2123 * `parent-smallest`: add from the smallest delta (either p1 or p2)
2124 2124 * `storage`: add from the existing precomputed deltas
2125 2125
2126 2126 Note: This performance command measures performance in a custom way. As a
2127 2127 result some of the global configuration of the 'perf' command does not
2128 2128 apply to it:
2129 2129
2130 2130 * ``pre-run``: disabled
2131 2131
2132 2132 * ``profile-benchmark``: disabled
2133 2133
2134 2134 * ``run-limits``: disabled use --count instead
2135 2135 """
2136 2136 opts = _byteskwargs(opts)
2137 2137
2138 2138 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
2139 2139 rllen = getlen(ui)(rl)
2140 2140 if startrev < 0:
2141 2141 startrev = rllen + startrev
2142 2142 if stoprev < 0:
2143 2143 stoprev = rllen + stoprev
2144 2144
2145 2145 lazydeltabase = opts['lazydeltabase']
2146 2146 source = opts['source']
2147 2147 clearcaches = opts['clear_caches']
2148 2148 validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest',
2149 2149 b'storage')
2150 2150 if source not in validsource:
2151 2151 raise error.Abort('invalid source type: %s' % source)
2152 2152
2153 2153 ### actually gather results
2154 2154 count = opts['count']
2155 2155 if count <= 0:
2156 2156 raise error.Abort('invalide run count: %d' % count)
2157 2157 allresults = []
2158 2158 for c in range(count):
2159 2159 timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1,
2160 2160 lazydeltabase=lazydeltabase,
2161 2161 clearcaches=clearcaches)
2162 2162 allresults.append(timing)
2163 2163
2164 2164 ### consolidate the results in a single list
2165 2165 results = []
2166 2166 for idx, (rev, t) in enumerate(allresults[0]):
2167 2167 ts = [t]
2168 2168 for other in allresults[1:]:
2169 2169 orev, ot = other[idx]
2170 2170 assert orev == rev
2171 2171 ts.append(ot)
2172 2172 results.append((rev, ts))
2173 2173 resultcount = len(results)
2174 2174
2175 2175 ### Compute and display relevant statistics
2176 2176
2177 2177 # get a formatter
2178 2178 fm = ui.formatter(b'perf', opts)
2179 2179 displayall = ui.configbool(b"perf", b"all-timing", False)
2180 2180
2181 2181 # print individual details if requested
2182 2182 if opts['details']:
2183 2183 for idx, item in enumerate(results, 1):
2184 2184 rev, data = item
2185 2185 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
2186 2186 formatone(fm, data, title=title, displayall=displayall)
2187 2187
2188 2188 # sorts results by median time
2189 2189 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
2190 2190 # list of (name, index) to display)
2191 2191 relevants = [
2192 2192 ("min", 0),
2193 2193 ("10%", resultcount * 10 // 100),
2194 2194 ("25%", resultcount * 25 // 100),
2195 2195 ("50%", resultcount * 70 // 100),
2196 2196 ("75%", resultcount * 75 // 100),
2197 2197 ("90%", resultcount * 90 // 100),
2198 2198 ("95%", resultcount * 95 // 100),
2199 2199 ("99%", resultcount * 99 // 100),
2200 2200 ("99.9%", resultcount * 999 // 1000),
2201 2201 ("99.99%", resultcount * 9999 // 10000),
2202 2202 ("99.999%", resultcount * 99999 // 100000),
2203 2203 ("max", -1),
2204 2204 ]
2205 2205 if not ui.quiet:
2206 2206 for name, idx in relevants:
2207 2207 data = results[idx]
2208 2208 title = '%s of %d, rev %d' % (name, resultcount, data[0])
2209 2209 formatone(fm, data[1], title=title, displayall=displayall)
2210 2210
2211 2211 # XXX summing that many float will not be very precise, we ignore this fact
2212 2212 # for now
2213 2213 totaltime = []
2214 2214 for item in allresults:
2215 2215 totaltime.append((sum(x[1][0] for x in item),
2216 2216 sum(x[1][1] for x in item),
2217 2217 sum(x[1][2] for x in item),)
2218 2218 )
2219 2219 formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
2220 2220 displayall=displayall)
2221 2221 fm.end()
2222 2222
2223 2223 class _faketr(object):
2224 2224 def add(s, x, y, z=None):
2225 2225 return None
2226 2226
2227 2227 def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None,
2228 2228 lazydeltabase=True, clearcaches=True):
2229 2229 timings = []
2230 2230 tr = _faketr()
2231 2231 with _temprevlog(ui, orig, startrev) as dest:
2232 2232 dest._lazydeltabase = lazydeltabase
2233 2233 revs = list(orig.revs(startrev, stoprev))
2234 2234 total = len(revs)
2235 2235 topic = 'adding'
2236 2236 if runidx is not None:
2237 2237 topic += ' (run #%d)' % runidx
2238 2238 # Support both old and new progress API
2239 2239 if util.safehasattr(ui, 'makeprogress'):
2240 2240 progress = ui.makeprogress(topic, unit='revs', total=total)
2241 2241 def updateprogress(pos):
2242 2242 progress.update(pos)
2243 2243 def completeprogress():
2244 2244 progress.complete()
2245 2245 else:
2246 2246 def updateprogress(pos):
2247 2247 ui.progress(topic, pos, unit='revs', total=total)
2248 2248 def completeprogress():
2249 2249 ui.progress(topic, None, unit='revs', total=total)
2250 2250
2251 2251 for idx, rev in enumerate(revs):
2252 2252 updateprogress(idx)
2253 2253 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
2254 2254 if clearcaches:
2255 2255 dest.index.clearcaches()
2256 2256 dest.clearcaches()
2257 2257 with timeone() as r:
2258 2258 dest.addrawrevision(*addargs, **addkwargs)
2259 2259 timings.append((rev, r[0]))
2260 2260 updateprogress(total)
2261 2261 completeprogress()
2262 2262 return timings
2263 2263
2264 2264 def _getrevisionseed(orig, rev, tr, source):
2265 2265 from mercurial.node import nullid
2266 2266
2267 2267 linkrev = orig.linkrev(rev)
2268 2268 node = orig.node(rev)
2269 2269 p1, p2 = orig.parents(node)
2270 2270 flags = orig.flags(rev)
2271 2271 cachedelta = None
2272 2272 text = None
2273 2273
2274 2274 if source == b'full':
2275 2275 text = orig.revision(rev)
2276 2276 elif source == b'parent-1':
2277 2277 baserev = orig.rev(p1)
2278 2278 cachedelta = (baserev, orig.revdiff(p1, rev))
2279 2279 elif source == b'parent-2':
2280 2280 parent = p2
2281 2281 if p2 == nullid:
2282 2282 parent = p1
2283 2283 baserev = orig.rev(parent)
2284 2284 cachedelta = (baserev, orig.revdiff(parent, rev))
2285 2285 elif source == b'parent-smallest':
2286 2286 p1diff = orig.revdiff(p1, rev)
2287 2287 parent = p1
2288 2288 diff = p1diff
2289 2289 if p2 != nullid:
2290 2290 p2diff = orig.revdiff(p2, rev)
2291 2291 if len(p1diff) > len(p2diff):
2292 2292 parent = p2
2293 2293 diff = p2diff
2294 2294 baserev = orig.rev(parent)
2295 2295 cachedelta = (baserev, diff)
2296 2296 elif source == b'storage':
2297 2297 baserev = orig.deltaparent(rev)
2298 2298 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
2299 2299
2300 2300 return ((text, tr, linkrev, p1, p2),
2301 2301 {'node': node, 'flags': flags, 'cachedelta': cachedelta})
2302 2302
2303 2303 @contextlib.contextmanager
2304 2304 def _temprevlog(ui, orig, truncaterev):
2305 2305 from mercurial import vfs as vfsmod
2306 2306
2307 2307 if orig._inline:
2308 2308 raise error.Abort('not supporting inline revlog (yet)')
2309 2309 revlogkwargs = {}
2310 2310 k = 'upperboundcomp'
2311 2311 if util.safehasattr(orig, k):
2312 2312 revlogkwargs[k] = getattr(orig, k)
2313 2313
2314 2314 origindexpath = orig.opener.join(orig.indexfile)
2315 2315 origdatapath = orig.opener.join(orig.datafile)
2316 2316 indexname = 'revlog.i'
2317 2317 dataname = 'revlog.d'
2318 2318
2319 2319 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
2320 2320 try:
2321 2321 # copy the data file in a temporary directory
2322 2322 ui.debug('copying data in %s\n' % tmpdir)
2323 2323 destindexpath = os.path.join(tmpdir, 'revlog.i')
2324 2324 destdatapath = os.path.join(tmpdir, 'revlog.d')
2325 2325 shutil.copyfile(origindexpath, destindexpath)
2326 2326 shutil.copyfile(origdatapath, destdatapath)
2327 2327
2328 2328 # remove the data we want to add again
2329 2329 ui.debug('truncating data to be rewritten\n')
2330 2330 with open(destindexpath, 'ab') as index:
2331 2331 index.seek(0)
2332 2332 index.truncate(truncaterev * orig._io.size)
2333 2333 with open(destdatapath, 'ab') as data:
2334 2334 data.seek(0)
2335 2335 data.truncate(orig.start(truncaterev))
2336 2336
2337 2337 # instantiate a new revlog from the temporary copy
2338 2338 ui.debug('truncating adding to be rewritten\n')
2339 2339 vfs = vfsmod.vfs(tmpdir)
2340 2340 vfs.options = getattr(orig.opener, 'options', None)
2341 2341
2342 2342 dest = revlog.revlog(vfs,
2343 2343 indexfile=indexname,
2344 2344 datafile=dataname, **revlogkwargs)
2345 2345 if dest._inline:
2346 2346 raise error.Abort('not supporting inline revlog (yet)')
2347 2347 # make sure internals are initialized
2348 2348 dest.revision(len(dest) - 1)
2349 2349 yield dest
2350 2350 del dest, vfs
2351 2351 finally:
2352 2352 shutil.rmtree(tmpdir, True)
2353 2353
2354 2354 @command(b'perfrevlogchunks', revlogopts + formatteropts +
2355 2355 [(b'e', b'engines', b'', b'compression engines to use'),
2356 2356 (b's', b'startrev', 0, b'revision to start at')],
2357 2357 b'-c|-m|FILE')
2358 2358 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
2359 2359 """Benchmark operations on revlog chunks.
2360 2360
2361 2361 Logically, each revlog is a collection of fulltext revisions. However,
2362 2362 stored within each revlog are "chunks" of possibly compressed data. This
2363 2363 data needs to be read and decompressed or compressed and written.
2364 2364
2365 2365 This command measures the time it takes to read+decompress and recompress
2366 2366 chunks in a revlog. It effectively isolates I/O and compression performance.
2367 2367 For measurements of higher-level operations like resolving revisions,
2368 2368 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
2369 2369 """
2370 2370 opts = _byteskwargs(opts)
2371 2371
2372 2372 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
2373 2373
2374 2374 # _chunkraw was renamed to _getsegmentforrevs.
2375 2375 try:
2376 2376 segmentforrevs = rl._getsegmentforrevs
2377 2377 except AttributeError:
2378 2378 segmentforrevs = rl._chunkraw
2379 2379
2380 2380 # Verify engines argument.
2381 2381 if engines:
2382 2382 engines = set(e.strip() for e in engines.split(b','))
2383 2383 for engine in engines:
2384 2384 try:
2385 2385 util.compressionengines[engine]
2386 2386 except KeyError:
2387 2387 raise error.Abort(b'unknown compression engine: %s' % engine)
2388 2388 else:
2389 2389 engines = []
2390 2390 for e in util.compengines:
2391 2391 engine = util.compengines[e]
2392 2392 try:
2393 2393 if engine.available():
2394 2394 engine.revlogcompressor().compress(b'dummy')
2395 2395 engines.append(e)
2396 2396 except NotImplementedError:
2397 2397 pass
2398 2398
2399 2399 revs = list(rl.revs(startrev, len(rl) - 1))
2400 2400
2401 2401 def rlfh(rl):
2402 2402 if rl._inline:
2403 2403 return getsvfs(repo)(rl.indexfile)
2404 2404 else:
2405 2405 return getsvfs(repo)(rl.datafile)
2406 2406
2407 2407 def doread():
2408 2408 rl.clearcaches()
2409 2409 for rev in revs:
2410 2410 segmentforrevs(rev, rev)
2411 2411
2412 2412 def doreadcachedfh():
2413 2413 rl.clearcaches()
2414 2414 fh = rlfh(rl)
2415 2415 for rev in revs:
2416 2416 segmentforrevs(rev, rev, df=fh)
2417 2417
2418 2418 def doreadbatch():
2419 2419 rl.clearcaches()
2420 2420 segmentforrevs(revs[0], revs[-1])
2421 2421
2422 2422 def doreadbatchcachedfh():
2423 2423 rl.clearcaches()
2424 2424 fh = rlfh(rl)
2425 2425 segmentforrevs(revs[0], revs[-1], df=fh)
2426 2426
2427 2427 def dochunk():
2428 2428 rl.clearcaches()
2429 2429 fh = rlfh(rl)
2430 2430 for rev in revs:
2431 2431 rl._chunk(rev, df=fh)
2432 2432
2433 2433 chunks = [None]
2434 2434
2435 2435 def dochunkbatch():
2436 2436 rl.clearcaches()
2437 2437 fh = rlfh(rl)
2438 2438 # Save chunks as a side-effect.
2439 2439 chunks[0] = rl._chunks(revs, df=fh)
2440 2440
2441 2441 def docompress(compressor):
2442 2442 rl.clearcaches()
2443 2443
2444 2444 try:
2445 2445 # Swap in the requested compression engine.
2446 2446 oldcompressor = rl._compressor
2447 2447 rl._compressor = compressor
2448 2448 for chunk in chunks[0]:
2449 2449 rl.compress(chunk)
2450 2450 finally:
2451 2451 rl._compressor = oldcompressor
2452 2452
2453 2453 benches = [
2454 2454 (lambda: doread(), b'read'),
2455 2455 (lambda: doreadcachedfh(), b'read w/ reused fd'),
2456 2456 (lambda: doreadbatch(), b'read batch'),
2457 2457 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
2458 2458 (lambda: dochunk(), b'chunk'),
2459 2459 (lambda: dochunkbatch(), b'chunk batch'),
2460 2460 ]
2461 2461
2462 2462 for engine in sorted(engines):
2463 2463 compressor = util.compengines[engine].revlogcompressor()
2464 2464 benches.append((functools.partial(docompress, compressor),
2465 2465 b'compress w/ %s' % engine))
2466 2466
2467 2467 for fn, title in benches:
2468 2468 timer, fm = gettimer(ui, opts)
2469 2469 timer(fn, title=title)
2470 2470 fm.end()
2471 2471
2472 2472 @command(b'perfrevlogrevision', revlogopts + formatteropts +
2473 2473 [(b'', b'cache', False, b'use caches instead of clearing')],
2474 2474 b'-c|-m|FILE REV')
2475 2475 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
2476 2476 """Benchmark obtaining a revlog revision.
2477 2477
2478 2478 Obtaining a revlog revision consists of roughly the following steps:
2479 2479
2480 2480 1. Compute the delta chain
2481 2481 2. Slice the delta chain if applicable
2482 2482 3. Obtain the raw chunks for that delta chain
2483 2483 4. Decompress each raw chunk
2484 2484 5. Apply binary patches to obtain fulltext
2485 2485 6. Verify hash of fulltext
2486 2486
2487 2487 This command measures the time spent in each of these phases.
2488 2488 """
2489 2489 opts = _byteskwargs(opts)
2490 2490
2491 2491 if opts.get(b'changelog') or opts.get(b'manifest'):
2492 2492 file_, rev = None, file_
2493 2493 elif rev is None:
2494 2494 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
2495 2495
2496 2496 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
2497 2497
2498 2498 # _chunkraw was renamed to _getsegmentforrevs.
2499 2499 try:
2500 2500 segmentforrevs = r._getsegmentforrevs
2501 2501 except AttributeError:
2502 2502 segmentforrevs = r._chunkraw
2503 2503
2504 2504 node = r.lookup(rev)
2505 2505 rev = r.rev(node)
2506 2506
2507 2507 def getrawchunks(data, chain):
2508 2508 start = r.start
2509 2509 length = r.length
2510 2510 inline = r._inline
2511 2511 iosize = r._io.size
2512 2512 buffer = util.buffer
2513 2513
2514 2514 chunks = []
2515 2515 ladd = chunks.append
2516 2516 for idx, item in enumerate(chain):
2517 2517 offset = start(item[0])
2518 2518 bits = data[idx]
2519 2519 for rev in item:
2520 2520 chunkstart = start(rev)
2521 2521 if inline:
2522 2522 chunkstart += (rev + 1) * iosize
2523 2523 chunklength = length(rev)
2524 2524 ladd(buffer(bits, chunkstart - offset, chunklength))
2525 2525
2526 2526 return chunks
2527 2527
2528 2528 def dodeltachain(rev):
2529 2529 if not cache:
2530 2530 r.clearcaches()
2531 2531 r._deltachain(rev)
2532 2532
2533 2533 def doread(chain):
2534 2534 if not cache:
2535 2535 r.clearcaches()
2536 2536 for item in slicedchain:
2537 2537 segmentforrevs(item[0], item[-1])
2538 2538
2539 2539 def doslice(r, chain, size):
2540 2540 for s in slicechunk(r, chain, targetsize=size):
2541 2541 pass
2542 2542
2543 2543 def dorawchunks(data, chain):
2544 2544 if not cache:
2545 2545 r.clearcaches()
2546 2546 getrawchunks(data, chain)
2547 2547
2548 2548 def dodecompress(chunks):
2549 2549 decomp = r.decompress
2550 2550 for chunk in chunks:
2551 2551 decomp(chunk)
2552 2552
2553 2553 def dopatch(text, bins):
2554 2554 if not cache:
2555 2555 r.clearcaches()
2556 2556 mdiff.patches(text, bins)
2557 2557
2558 2558 def dohash(text):
2559 2559 if not cache:
2560 2560 r.clearcaches()
2561 2561 r.checkhash(text, node, rev=rev)
2562 2562
2563 2563 def dorevision():
2564 2564 if not cache:
2565 2565 r.clearcaches()
2566 2566 r.revision(node)
2567 2567
2568 2568 try:
2569 2569 from mercurial.revlogutils.deltas import slicechunk
2570 2570 except ImportError:
2571 2571 slicechunk = getattr(revlog, '_slicechunk', None)
2572 2572
2573 2573 size = r.length(rev)
2574 2574 chain = r._deltachain(rev)[0]
2575 2575 if not getattr(r, '_withsparseread', False):
2576 2576 slicedchain = (chain,)
2577 2577 else:
2578 2578 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
2579 2579 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
2580 2580 rawchunks = getrawchunks(data, slicedchain)
2581 2581 bins = r._chunks(chain)
2582 2582 text = bytes(bins[0])
2583 2583 bins = bins[1:]
2584 2584 text = mdiff.patches(text, bins)
2585 2585
2586 2586 benches = [
2587 2587 (lambda: dorevision(), b'full'),
2588 2588 (lambda: dodeltachain(rev), b'deltachain'),
2589 2589 (lambda: doread(chain), b'read'),
2590 2590 ]
2591 2591
2592 2592 if getattr(r, '_withsparseread', False):
2593 2593 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
2594 2594 benches.append(slicing)
2595 2595
2596 2596 benches.extend([
2597 2597 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
2598 2598 (lambda: dodecompress(rawchunks), b'decompress'),
2599 2599 (lambda: dopatch(text, bins), b'patch'),
2600 2600 (lambda: dohash(text), b'hash'),
2601 2601 ])
2602 2602
2603 2603 timer, fm = gettimer(ui, opts)
2604 2604 for fn, title in benches:
2605 2605 timer(fn, title=title)
2606 2606 fm.end()
2607 2607
2608 2608 @command(b'perfrevset',
2609 2609 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
2610 2610 (b'', b'contexts', False, b'obtain changectx for each revision')]
2611 2611 + formatteropts, b"REVSET")
2612 2612 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
2613 2613 """benchmark the execution time of a revset
2614 2614
2615 2615 Use the --clean option if need to evaluate the impact of build volatile
2616 2616 revisions set cache on the revset execution. Volatile cache hold filtered
2617 2617 and obsolete related cache."""
2618 2618 opts = _byteskwargs(opts)
2619 2619
2620 2620 timer, fm = gettimer(ui, opts)
2621 2621 def d():
2622 2622 if clear:
2623 2623 repo.invalidatevolatilesets()
2624 2624 if contexts:
2625 2625 for ctx in repo.set(expr): pass
2626 2626 else:
2627 2627 for r in repo.revs(expr): pass
2628 2628 timer(d)
2629 2629 fm.end()
2630 2630
2631 2631 @command(b'perfvolatilesets',
2632 2632 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
2633 2633 ] + formatteropts)
2634 2634 def perfvolatilesets(ui, repo, *names, **opts):
2635 2635 """benchmark the computation of various volatile set
2636 2636
2637 2637 Volatile set computes element related to filtering and obsolescence."""
2638 2638 opts = _byteskwargs(opts)
2639 2639 timer, fm = gettimer(ui, opts)
2640 2640 repo = repo.unfiltered()
2641 2641
2642 2642 def getobs(name):
2643 2643 def d():
2644 2644 repo.invalidatevolatilesets()
2645 2645 if opts[b'clear_obsstore']:
2646 2646 clearfilecache(repo, b'obsstore')
2647 2647 obsolete.getrevs(repo, name)
2648 2648 return d
2649 2649
2650 2650 allobs = sorted(obsolete.cachefuncs)
2651 2651 if names:
2652 2652 allobs = [n for n in allobs if n in names]
2653 2653
2654 2654 for name in allobs:
2655 2655 timer(getobs(name), title=name)
2656 2656
2657 2657 def getfiltered(name):
2658 2658 def d():
2659 2659 repo.invalidatevolatilesets()
2660 2660 if opts[b'clear_obsstore']:
2661 2661 clearfilecache(repo, b'obsstore')
2662 2662 repoview.filterrevs(repo, name)
2663 2663 return d
2664 2664
2665 2665 allfilter = sorted(repoview.filtertable)
2666 2666 if names:
2667 2667 allfilter = [n for n in allfilter if n in names]
2668 2668
2669 2669 for name in allfilter:
2670 2670 timer(getfiltered(name), title=name)
2671 2671 fm.end()
2672 2672
2673 2673 @command(b'perfbranchmap',
2674 2674 [(b'f', b'full', False,
2675 2675 b'Includes build time of subset'),
2676 2676 (b'', b'clear-revbranch', False,
2677 2677 b'purge the revbranch cache between computation'),
2678 2678 ] + formatteropts)
2679 2679 def perfbranchmap(ui, repo, *filternames, **opts):
2680 2680 """benchmark the update of a branchmap
2681 2681
2682 2682 This benchmarks the full repo.branchmap() call with read and write disabled
2683 2683 """
2684 2684 opts = _byteskwargs(opts)
2685 2685 full = opts.get(b"full", False)
2686 2686 clear_revbranch = opts.get(b"clear_revbranch", False)
2687 2687 timer, fm = gettimer(ui, opts)
2688 2688 def getbranchmap(filtername):
2689 2689 """generate a benchmark function for the filtername"""
2690 2690 if filtername is None:
2691 2691 view = repo
2692 2692 else:
2693 2693 view = repo.filtered(filtername)
2694 2694 if util.safehasattr(view._branchcaches, '_per_filter'):
2695 2695 filtered = view._branchcaches._per_filter
2696 2696 else:
2697 2697 # older versions
2698 2698 filtered = view._branchcaches
2699 2699 def d():
2700 2700 if clear_revbranch:
2701 2701 repo.revbranchcache()._clear()
2702 2702 if full:
2703 2703 view._branchcaches.clear()
2704 2704 else:
2705 2705 filtered.pop(filtername, None)
2706 2706 view.branchmap()
2707 2707 return d
2708 2708 # add filter in smaller subset to bigger subset
2709 2709 possiblefilters = set(repoview.filtertable)
2710 2710 if filternames:
2711 2711 possiblefilters &= set(filternames)
2712 2712 subsettable = getbranchmapsubsettable()
2713 2713 allfilters = []
2714 2714 while possiblefilters:
2715 2715 for name in possiblefilters:
2716 2716 subset = subsettable.get(name)
2717 2717 if subset not in possiblefilters:
2718 2718 break
2719 2719 else:
2720 2720 assert False, b'subset cycle %s!' % possiblefilters
2721 2721 allfilters.append(name)
2722 2722 possiblefilters.remove(name)
2723 2723
2724 2724 # warm the cache
2725 2725 if not full:
2726 2726 for name in allfilters:
2727 2727 repo.filtered(name).branchmap()
2728 2728 if not filternames or b'unfiltered' in filternames:
2729 2729 # add unfiltered
2730 2730 allfilters.append(None)
2731 2731
2732 2732 if util.safehasattr(branchmap.branchcache, 'fromfile'):
2733 2733 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
2734 2734 branchcacheread.set(classmethod(lambda *args: None))
2735 2735 else:
2736 2736 # older versions
2737 2737 branchcacheread = safeattrsetter(branchmap, b'read')
2738 2738 branchcacheread.set(lambda *args: None)
2739 2739 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
2740 2740 branchcachewrite.set(lambda *args: None)
2741 2741 try:
2742 2742 for name in allfilters:
2743 2743 printname = name
2744 2744 if name is None:
2745 2745 printname = b'unfiltered'
2746 2746 timer(getbranchmap(name), title=str(printname))
2747 2747 finally:
2748 2748 branchcacheread.restore()
2749 2749 branchcachewrite.restore()
2750 2750 fm.end()
2751 2751
2752 2752 @command(b'perfbranchmapupdate', [
2753 2753 (b'', b'base', [], b'subset of revision to start from'),
2754 2754 (b'', b'target', [], b'subset of revision to end with'),
2755 2755 (b'', b'clear-caches', False, b'clear cache between each runs')
2756 2756 ] + formatteropts)
2757 2757 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
2758 2758 """benchmark branchmap update from for <base> revs to <target> revs
2759 2759
2760 2760 If `--clear-caches` is passed, the following items will be reset before
2761 2761 each update:
2762 2762 * the changelog instance and associated indexes
2763 2763 * the rev-branch-cache instance
2764 2764
2765 2765 Examples:
2766 2766
2767 2767 # update for the one last revision
2768 2768 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
2769 2769
2770 2770 $ update for change coming with a new branch
2771 2771 $ hg perfbranchmapupdate --base 'stable' --target 'default'
2772 2772 """
2773 2773 from mercurial import branchmap
2774 2774 from mercurial import repoview
2775 2775 opts = _byteskwargs(opts)
2776 2776 timer, fm = gettimer(ui, opts)
2777 2777 clearcaches = opts[b'clear_caches']
2778 2778 unfi = repo.unfiltered()
2779 2779 x = [None] # used to pass data between closure
2780 2780
2781 2781 # we use a `list` here to avoid possible side effect from smartset
2782 2782 baserevs = list(scmutil.revrange(repo, base))
2783 2783 targetrevs = list(scmutil.revrange(repo, target))
2784 2784 if not baserevs:
2785 2785 raise error.Abort(b'no revisions selected for --base')
2786 2786 if not targetrevs:
2787 2787 raise error.Abort(b'no revisions selected for --target')
2788 2788
2789 2789 # make sure the target branchmap also contains the one in the base
2790 2790 targetrevs = list(set(baserevs) | set(targetrevs))
2791 2791 targetrevs.sort()
2792 2792
2793 2793 cl = repo.changelog
2794 2794 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
2795 2795 allbaserevs.sort()
2796 2796 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
2797 2797
2798 2798 newrevs = list(alltargetrevs.difference(allbaserevs))
2799 2799 newrevs.sort()
2800 2800
2801 2801 allrevs = frozenset(unfi.changelog.revs())
2802 2802 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
2803 2803 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
2804 2804
2805 2805 def basefilter(repo, visibilityexceptions=None):
2806 2806 return basefilterrevs
2807 2807
2808 2808 def targetfilter(repo, visibilityexceptions=None):
2809 2809 return targetfilterrevs
2810 2810
2811 2811 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
2812 2812 ui.status(msg % (len(allbaserevs), len(newrevs)))
2813 2813 if targetfilterrevs:
2814 2814 msg = b'(%d revisions still filtered)\n'
2815 2815 ui.status(msg % len(targetfilterrevs))
2816 2816
2817 2817 try:
2818 2818 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
2819 2819 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
2820 2820
2821 2821 baserepo = repo.filtered(b'__perf_branchmap_update_base')
2822 2822 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
2823 2823
2824 2824 # try to find an existing branchmap to reuse
2825 2825 subsettable = getbranchmapsubsettable()
2826 2826 candidatefilter = subsettable.get(None)
2827 2827 while candidatefilter is not None:
2828 2828 candidatebm = repo.filtered(candidatefilter).branchmap()
2829 2829 if candidatebm.validfor(baserepo):
2830 2830 filtered = repoview.filterrevs(repo, candidatefilter)
2831 2831 missing = [r for r in allbaserevs if r in filtered]
2832 2832 base = candidatebm.copy()
2833 2833 base.update(baserepo, missing)
2834 2834 break
2835 2835 candidatefilter = subsettable.get(candidatefilter)
2836 2836 else:
2837 2837 # no suitable subset where found
2838 2838 base = branchmap.branchcache()
2839 2839 base.update(baserepo, allbaserevs)
2840 2840
2841 2841 def setup():
2842 2842 x[0] = base.copy()
2843 2843 if clearcaches:
2844 2844 unfi._revbranchcache = None
2845 2845 clearchangelog(repo)
2846 2846
2847 2847 def bench():
2848 2848 x[0].update(targetrepo, newrevs)
2849 2849
2850 2850 timer(bench, setup=setup)
2851 2851 fm.end()
2852 2852 finally:
2853 2853 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
2854 2854 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
2855 2855
2856 2856 @command(b'perfbranchmapload', [
2857 2857 (b'f', b'filter', b'', b'Specify repoview filter'),
2858 2858 (b'', b'list', False, b'List brachmap filter caches'),
2859 2859 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
2860 2860
2861 2861 ] + formatteropts)
2862 2862 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
2863 2863 """benchmark reading the branchmap"""
2864 2864 opts = _byteskwargs(opts)
2865 2865 clearrevlogs = opts[b'clear_revlogs']
2866 2866
2867 2867 if list:
2868 2868 for name, kind, st in repo.cachevfs.readdir(stat=True):
2869 2869 if name.startswith(b'branch2'):
2870 2870 filtername = name.partition(b'-')[2] or b'unfiltered'
2871 2871 ui.status(b'%s - %s\n'
2872 2872 % (filtername, util.bytecount(st.st_size)))
2873 2873 return
2874 2874 if not filter:
2875 2875 filter = None
2876 2876 subsettable = getbranchmapsubsettable()
2877 2877 if filter is None:
2878 2878 repo = repo.unfiltered()
2879 2879 else:
2880 2880 repo = repoview.repoview(repo, filter)
2881 2881
2882 2882 repo.branchmap() # make sure we have a relevant, up to date branchmap
2883 2883
2884 2884 try:
2885 2885 fromfile = branchmap.branchcache.fromfile
2886 2886 except AttributeError:
2887 2887 # older versions
2888 2888 fromfile = branchmap.read
2889 2889
2890 2890 currentfilter = filter
2891 2891 # try once without timer, the filter may not be cached
2892 2892 while fromfile(repo) is None:
2893 2893 currentfilter = subsettable.get(currentfilter)
2894 2894 if currentfilter is None:
2895 2895 raise error.Abort(b'No branchmap cached for %s repo'
2896 2896 % (filter or b'unfiltered'))
2897 2897 repo = repo.filtered(currentfilter)
2898 2898 timer, fm = gettimer(ui, opts)
2899 2899 def setup():
2900 2900 if clearrevlogs:
2901 2901 clearchangelog(repo)
2902 2902 def bench():
2903 2903 fromfile(repo)
2904 2904 timer(bench, setup=setup)
2905 2905 fm.end()
2906 2906
2907 2907 @command(b'perfloadmarkers')
2908 2908 def perfloadmarkers(ui, repo):
2909 2909 """benchmark the time to parse the on-disk markers for a repo
2910 2910
2911 2911 Result is the number of markers in the repo."""
2912 2912 timer, fm = gettimer(ui)
2913 2913 svfs = getsvfs(repo)
2914 2914 timer(lambda: len(obsolete.obsstore(svfs)))
2915 2915 fm.end()
2916 2916
2917 2917 @command(b'perflrucachedict', formatteropts +
2918 2918 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
2919 2919 (b'', b'mincost', 0, b'smallest cost of items in cache'),
2920 2920 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
2921 2921 (b'', b'size', 4, b'size of cache'),
2922 2922 (b'', b'gets', 10000, b'number of key lookups'),
2923 2923 (b'', b'sets', 10000, b'number of key sets'),
2924 2924 (b'', b'mixed', 10000, b'number of mixed mode operations'),
2925 2925 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
2926 2926 norepo=True)
2927 2927 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
2928 2928 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
2929 2929 opts = _byteskwargs(opts)
2930 2930
2931 2931 def doinit():
2932 2932 for i in _xrange(10000):
2933 2933 util.lrucachedict(size)
2934 2934
2935 2935 costrange = list(range(mincost, maxcost + 1))
2936 2936
2937 2937 values = []
2938 2938 for i in _xrange(size):
2939 2939 values.append(random.randint(0, _maxint))
2940 2940
2941 2941 # Get mode fills the cache and tests raw lookup performance with no
2942 2942 # eviction.
2943 2943 getseq = []
2944 2944 for i in _xrange(gets):
2945 2945 getseq.append(random.choice(values))
2946 2946
2947 2947 def dogets():
2948 2948 d = util.lrucachedict(size)
2949 2949 for v in values:
2950 2950 d[v] = v
2951 2951 for key in getseq:
2952 2952 value = d[key]
2953 2953 value # silence pyflakes warning
2954 2954
2955 2955 def dogetscost():
2956 2956 d = util.lrucachedict(size, maxcost=costlimit)
2957 2957 for i, v in enumerate(values):
2958 2958 d.insert(v, v, cost=costs[i])
2959 2959 for key in getseq:
2960 2960 try:
2961 2961 value = d[key]
2962 2962 value # silence pyflakes warning
2963 2963 except KeyError:
2964 2964 pass
2965 2965
2966 2966 # Set mode tests insertion speed with cache eviction.
2967 2967 setseq = []
2968 2968 costs = []
2969 2969 for i in _xrange(sets):
2970 2970 setseq.append(random.randint(0, _maxint))
2971 2971 costs.append(random.choice(costrange))
2972 2972
2973 2973 def doinserts():
2974 2974 d = util.lrucachedict(size)
2975 2975 for v in setseq:
2976 2976 d.insert(v, v)
2977 2977
2978 2978 def doinsertscost():
2979 2979 d = util.lrucachedict(size, maxcost=costlimit)
2980 2980 for i, v in enumerate(setseq):
2981 2981 d.insert(v, v, cost=costs[i])
2982 2982
2983 2983 def dosets():
2984 2984 d = util.lrucachedict(size)
2985 2985 for v in setseq:
2986 2986 d[v] = v
2987 2987
2988 2988 # Mixed mode randomly performs gets and sets with eviction.
2989 2989 mixedops = []
2990 2990 for i in _xrange(mixed):
2991 2991 r = random.randint(0, 100)
2992 2992 if r < mixedgetfreq:
2993 2993 op = 0
2994 2994 else:
2995 2995 op = 1
2996 2996
2997 2997 mixedops.append((op,
2998 2998 random.randint(0, size * 2),
2999 2999 random.choice(costrange)))
3000 3000
3001 3001 def domixed():
3002 3002 d = util.lrucachedict(size)
3003 3003
3004 3004 for op, v, cost in mixedops:
3005 3005 if op == 0:
3006 3006 try:
3007 3007 d[v]
3008 3008 except KeyError:
3009 3009 pass
3010 3010 else:
3011 3011 d[v] = v
3012 3012
3013 3013 def domixedcost():
3014 3014 d = util.lrucachedict(size, maxcost=costlimit)
3015 3015
3016 3016 for op, v, cost in mixedops:
3017 3017 if op == 0:
3018 3018 try:
3019 3019 d[v]
3020 3020 except KeyError:
3021 3021 pass
3022 3022 else:
3023 3023 d.insert(v, v, cost=cost)
3024 3024
3025 3025 benches = [
3026 3026 (doinit, b'init'),
3027 3027 ]
3028 3028
3029 3029 if costlimit:
3030 3030 benches.extend([
3031 3031 (dogetscost, b'gets w/ cost limit'),
3032 3032 (doinsertscost, b'inserts w/ cost limit'),
3033 3033 (domixedcost, b'mixed w/ cost limit'),
3034 3034 ])
3035 3035 else:
3036 3036 benches.extend([
3037 3037 (dogets, b'gets'),
3038 3038 (doinserts, b'inserts'),
3039 3039 (dosets, b'sets'),
3040 3040 (domixed, b'mixed')
3041 3041 ])
3042 3042
3043 3043 for fn, title in benches:
3044 3044 timer, fm = gettimer(ui, opts)
3045 3045 timer(fn, title=title)
3046 3046 fm.end()
3047 3047
3048 3048 @command(b'perfwrite', formatteropts)
3049 3049 def perfwrite(ui, repo, **opts):
3050 3050 """microbenchmark ui.write
3051 3051 """
3052 3052 opts = _byteskwargs(opts)
3053 3053
3054 3054 timer, fm = gettimer(ui, opts)
3055 3055 def write():
3056 3056 for i in range(100000):
3057 3057 ui.write((b'Testing write performance\n'))
3058 3058 timer(write)
3059 3059 fm.end()
3060 3060
3061 3061 def uisetup(ui):
3062 3062 if (util.safehasattr(cmdutil, b'openrevlog') and
3063 3063 not util.safehasattr(commands, b'debugrevlogopts')):
3064 3064 # for "historical portability":
3065 3065 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
3066 3066 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
3067 3067 # openrevlog() should cause failure, because it has been
3068 3068 # available since 3.5 (or 49c583ca48c4).
3069 3069 def openrevlog(orig, repo, cmd, file_, opts):
3070 3070 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
3071 3071 raise error.Abort(b"This version doesn't support --dir option",
3072 3072 hint=b"use 3.5 or later")
3073 3073 return orig(repo, cmd, file_, opts)
3074 3074 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
3075 3075
3076 3076 @command(b'perfprogress', formatteropts + [
3077 3077 (b'', b'topic', b'topic', b'topic for progress messages'),
3078 3078 (b'c', b'total', 1000000, b'total value we are progressing to'),
3079 3079 ], norepo=True)
3080 3080 def perfprogress(ui, topic=None, total=None, **opts):
3081 3081 """printing of progress bars"""
3082 3082 opts = _byteskwargs(opts)
3083 3083
3084 3084 timer, fm = gettimer(ui, opts)
3085 3085
3086 3086 def doprogress():
3087 3087 with ui.makeprogress(topic, total=total) as progress:
3088 3088 for i in pycompat.xrange(total):
3089 3089 progress.increment()
3090 3090
3091 3091 timer(doprogress)
3092 3092 fm.end()
General Comments 0
You need to be logged in to leave comments. Login now