##// END OF EJS Templates
perf: add a perfignore command...
Boris Feld -
r40781:45a0047c default
parent child Browse files
Show More
@@ -1,2480 +1,2497 b''
1 1 # perf.py - performance test routines
2 2 '''helper extension to measure performance'''
3 3
4 4 # "historical portability" policy of perf.py:
5 5 #
6 6 # We have to do:
7 7 # - make perf.py "loadable" with as wide Mercurial version as possible
8 8 # This doesn't mean that perf commands work correctly with that Mercurial.
9 9 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
10 10 # - make historical perf command work correctly with as wide Mercurial
11 11 # version as possible
12 12 #
13 13 # We have to do, if possible with reasonable cost:
14 14 # - make recent perf command for historical feature work correctly
15 15 # with early Mercurial
16 16 #
17 17 # We don't have to do:
18 18 # - make perf command for recent feature work correctly with early
19 19 # Mercurial
20 20
21 21 from __future__ import absolute_import
22 22 import contextlib
23 23 import functools
24 24 import gc
25 25 import os
26 26 import random
27 27 import shutil
28 28 import struct
29 29 import sys
30 30 import tempfile
31 31 import threading
32 32 import time
33 33 from mercurial import (
34 34 changegroup,
35 35 cmdutil,
36 36 commands,
37 37 copies,
38 38 error,
39 39 extensions,
40 40 mdiff,
41 41 merge,
42 42 revlog,
43 43 util,
44 44 )
45 45
46 46 # for "historical portability":
47 47 # try to import modules separately (in dict order), and ignore
48 48 # failure, because these aren't available with early Mercurial
49 49 try:
50 50 from mercurial import branchmap # since 2.5 (or bcee63733aad)
51 51 except ImportError:
52 52 pass
53 53 try:
54 54 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
55 55 except ImportError:
56 56 pass
57 57 try:
58 58 from mercurial import registrar # since 3.7 (or 37d50250b696)
59 59 dir(registrar) # forcibly load it
60 60 except ImportError:
61 61 registrar = None
62 62 try:
63 63 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
64 64 except ImportError:
65 65 pass
66 66 try:
67 67 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
68 68 except ImportError:
69 69 pass
70 70
71 71 def identity(a):
72 72 return a
73 73
74 74 try:
75 75 from mercurial import pycompat
76 76 getargspec = pycompat.getargspec # added to module after 4.5
77 77 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
78 78 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
79 79 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
80 80 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
81 81 if pycompat.ispy3:
82 82 _maxint = sys.maxsize # per py3 docs for replacing maxint
83 83 else:
84 84 _maxint = sys.maxint
85 85 except (ImportError, AttributeError):
86 86 import inspect
87 87 getargspec = inspect.getargspec
88 88 _byteskwargs = identity
89 89 fsencode = identity # no py3 support
90 90 _maxint = sys.maxint # no py3 support
91 91 _sysstr = lambda x: x # no py3 support
92 92 _xrange = xrange
93 93
94 94 try:
95 95 # 4.7+
96 96 queue = pycompat.queue.Queue
97 97 except (AttributeError, ImportError):
98 98 # <4.7.
99 99 try:
100 100 queue = pycompat.queue
101 101 except (AttributeError, ImportError):
102 102 queue = util.queue
103 103
104 104 try:
105 105 from mercurial import logcmdutil
106 106 makelogtemplater = logcmdutil.maketemplater
107 107 except (AttributeError, ImportError):
108 108 try:
109 109 makelogtemplater = cmdutil.makelogtemplater
110 110 except (AttributeError, ImportError):
111 111 makelogtemplater = None
112 112
113 113 # for "historical portability":
114 114 # define util.safehasattr forcibly, because util.safehasattr has been
115 115 # available since 1.9.3 (or 94b200a11cf7)
116 116 _undefined = object()
117 117 def safehasattr(thing, attr):
118 118 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
119 119 setattr(util, 'safehasattr', safehasattr)
120 120
121 121 # for "historical portability":
122 122 # define util.timer forcibly, because util.timer has been available
123 123 # since ae5d60bb70c9
124 124 if safehasattr(time, 'perf_counter'):
125 125 util.timer = time.perf_counter
126 126 elif os.name == b'nt':
127 127 util.timer = time.clock
128 128 else:
129 129 util.timer = time.time
130 130
131 131 # for "historical portability":
132 132 # use locally defined empty option list, if formatteropts isn't
133 133 # available, because commands.formatteropts has been available since
134 134 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
135 135 # available since 2.2 (or ae5f92e154d3)
136 136 formatteropts = getattr(cmdutil, "formatteropts",
137 137 getattr(commands, "formatteropts", []))
138 138
139 139 # for "historical portability":
140 140 # use locally defined option list, if debugrevlogopts isn't available,
141 141 # because commands.debugrevlogopts has been available since 3.7 (or
142 142 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
143 143 # since 1.9 (or a79fea6b3e77).
144 144 revlogopts = getattr(cmdutil, "debugrevlogopts",
145 145 getattr(commands, "debugrevlogopts", [
146 146 (b'c', b'changelog', False, (b'open changelog')),
147 147 (b'm', b'manifest', False, (b'open manifest')),
148 148 (b'', b'dir', False, (b'open directory manifest')),
149 149 ]))
150 150
151 151 cmdtable = {}
152 152
153 153 # for "historical portability":
154 154 # define parsealiases locally, because cmdutil.parsealiases has been
155 155 # available since 1.5 (or 6252852b4332)
156 156 def parsealiases(cmd):
157 157 return cmd.split(b"|")
158 158
159 159 if safehasattr(registrar, 'command'):
160 160 command = registrar.command(cmdtable)
161 161 elif safehasattr(cmdutil, 'command'):
162 162 command = cmdutil.command(cmdtable)
163 163 if b'norepo' not in getargspec(command).args:
164 164 # for "historical portability":
165 165 # wrap original cmdutil.command, because "norepo" option has
166 166 # been available since 3.1 (or 75a96326cecb)
167 167 _command = command
168 168 def command(name, options=(), synopsis=None, norepo=False):
169 169 if norepo:
170 170 commands.norepo += b' %s' % b' '.join(parsealiases(name))
171 171 return _command(name, list(options), synopsis)
172 172 else:
173 173 # for "historical portability":
174 174 # define "@command" annotation locally, because cmdutil.command
175 175 # has been available since 1.9 (or 2daa5179e73f)
176 176 def command(name, options=(), synopsis=None, norepo=False):
177 177 def decorator(func):
178 178 if synopsis:
179 179 cmdtable[name] = func, list(options), synopsis
180 180 else:
181 181 cmdtable[name] = func, list(options)
182 182 if norepo:
183 183 commands.norepo += b' %s' % b' '.join(parsealiases(name))
184 184 return func
185 185 return decorator
186 186
187 187 try:
188 188 import mercurial.registrar
189 189 import mercurial.configitems
190 190 configtable = {}
191 191 configitem = mercurial.registrar.configitem(configtable)
192 192 configitem(b'perf', b'presleep',
193 193 default=mercurial.configitems.dynamicdefault,
194 194 )
195 195 configitem(b'perf', b'stub',
196 196 default=mercurial.configitems.dynamicdefault,
197 197 )
198 198 configitem(b'perf', b'parentscount',
199 199 default=mercurial.configitems.dynamicdefault,
200 200 )
201 201 configitem(b'perf', b'all-timing',
202 202 default=mercurial.configitems.dynamicdefault,
203 203 )
204 204 except (ImportError, AttributeError):
205 205 pass
206 206
207 207 def getlen(ui):
208 208 if ui.configbool(b"perf", b"stub", False):
209 209 return lambda x: 1
210 210 return len
211 211
212 212 def gettimer(ui, opts=None):
213 213 """return a timer function and formatter: (timer, formatter)
214 214
215 215 This function exists to gather the creation of formatter in a single
216 216 place instead of duplicating it in all performance commands."""
217 217
218 218 # enforce an idle period before execution to counteract power management
219 219 # experimental config: perf.presleep
220 220 time.sleep(getint(ui, b"perf", b"presleep", 1))
221 221
222 222 if opts is None:
223 223 opts = {}
224 224 # redirect all to stderr unless buffer api is in use
225 225 if not ui._buffers:
226 226 ui = ui.copy()
227 227 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
228 228 if uifout:
229 229 # for "historical portability":
230 230 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
231 231 uifout.set(ui.ferr)
232 232
233 233 # get a formatter
234 234 uiformatter = getattr(ui, 'formatter', None)
235 235 if uiformatter:
236 236 fm = uiformatter(b'perf', opts)
237 237 else:
238 238 # for "historical portability":
239 239 # define formatter locally, because ui.formatter has been
240 240 # available since 2.2 (or ae5f92e154d3)
241 241 from mercurial import node
242 242 class defaultformatter(object):
243 243 """Minimized composition of baseformatter and plainformatter
244 244 """
245 245 def __init__(self, ui, topic, opts):
246 246 self._ui = ui
247 247 if ui.debugflag:
248 248 self.hexfunc = node.hex
249 249 else:
250 250 self.hexfunc = node.short
251 251 def __nonzero__(self):
252 252 return False
253 253 __bool__ = __nonzero__
254 254 def startitem(self):
255 255 pass
256 256 def data(self, **data):
257 257 pass
258 258 def write(self, fields, deftext, *fielddata, **opts):
259 259 self._ui.write(deftext % fielddata, **opts)
260 260 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
261 261 if cond:
262 262 self._ui.write(deftext % fielddata, **opts)
263 263 def plain(self, text, **opts):
264 264 self._ui.write(text, **opts)
265 265 def end(self):
266 266 pass
267 267 fm = defaultformatter(ui, b'perf', opts)
268 268
269 269 # stub function, runs code only once instead of in a loop
270 270 # experimental config: perf.stub
271 271 if ui.configbool(b"perf", b"stub", False):
272 272 return functools.partial(stub_timer, fm), fm
273 273
274 274 # experimental config: perf.all-timing
275 275 displayall = ui.configbool(b"perf", b"all-timing", False)
276 276 return functools.partial(_timer, fm, displayall=displayall), fm
277 277
278 278 def stub_timer(fm, func, setup=None, title=None):
279 279 if setup is not None:
280 280 setup()
281 281 func()
282 282
283 283 @contextlib.contextmanager
284 284 def timeone():
285 285 r = []
286 286 ostart = os.times()
287 287 cstart = util.timer()
288 288 yield r
289 289 cstop = util.timer()
290 290 ostop = os.times()
291 291 a, b = ostart, ostop
292 292 r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
293 293
294 294 def _timer(fm, func, setup=None, title=None, displayall=False):
295 295 gc.collect()
296 296 results = []
297 297 begin = util.timer()
298 298 count = 0
299 299 while True:
300 300 if setup is not None:
301 301 setup()
302 302 with timeone() as item:
303 303 r = func()
304 304 count += 1
305 305 results.append(item[0])
306 306 cstop = util.timer()
307 307 if cstop - begin > 3 and count >= 100:
308 308 break
309 309 if cstop - begin > 10 and count >= 3:
310 310 break
311 311
312 312 formatone(fm, results, title=title, result=r,
313 313 displayall=displayall)
314 314
315 315 def formatone(fm, timings, title=None, result=None, displayall=False):
316 316
317 317 count = len(timings)
318 318
319 319 fm.startitem()
320 320
321 321 if title:
322 322 fm.write(b'title', b'! %s\n', title)
323 323 if result:
324 324 fm.write(b'result', b'! result: %s\n', result)
325 325 def display(role, entry):
326 326 prefix = b''
327 327 if role != b'best':
328 328 prefix = b'%s.' % role
329 329 fm.plain(b'!')
330 330 fm.write(prefix + b'wall', b' wall %f', entry[0])
331 331 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
332 332 fm.write(prefix + b'user', b' user %f', entry[1])
333 333 fm.write(prefix + b'sys', b' sys %f', entry[2])
334 334 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
335 335 fm.plain(b'\n')
336 336 timings.sort()
337 337 min_val = timings[0]
338 338 display(b'best', min_val)
339 339 if displayall:
340 340 max_val = timings[-1]
341 341 display(b'max', max_val)
342 342 avg = tuple([sum(x) / count for x in zip(*timings)])
343 343 display(b'avg', avg)
344 344 median = timings[len(timings) // 2]
345 345 display(b'median', median)
346 346
347 347 # utilities for historical portability
348 348
349 349 def getint(ui, section, name, default):
350 350 # for "historical portability":
351 351 # ui.configint has been available since 1.9 (or fa2b596db182)
352 352 v = ui.config(section, name, None)
353 353 if v is None:
354 354 return default
355 355 try:
356 356 return int(v)
357 357 except ValueError:
358 358 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
359 359 % (section, name, v))
360 360
361 361 def safeattrsetter(obj, name, ignoremissing=False):
362 362 """Ensure that 'obj' has 'name' attribute before subsequent setattr
363 363
364 364 This function is aborted, if 'obj' doesn't have 'name' attribute
365 365 at runtime. This avoids overlooking removal of an attribute, which
366 366 breaks assumption of performance measurement, in the future.
367 367
368 368 This function returns the object to (1) assign a new value, and
369 369 (2) restore an original value to the attribute.
370 370
371 371 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
372 372 abortion, and this function returns None. This is useful to
373 373 examine an attribute, which isn't ensured in all Mercurial
374 374 versions.
375 375 """
376 376 if not util.safehasattr(obj, name):
377 377 if ignoremissing:
378 378 return None
379 379 raise error.Abort((b"missing attribute %s of %s might break assumption"
380 380 b" of performance measurement") % (name, obj))
381 381
382 382 origvalue = getattr(obj, _sysstr(name))
383 383 class attrutil(object):
384 384 def set(self, newvalue):
385 385 setattr(obj, _sysstr(name), newvalue)
386 386 def restore(self):
387 387 setattr(obj, _sysstr(name), origvalue)
388 388
389 389 return attrutil()
390 390
391 391 # utilities to examine each internal API changes
392 392
393 393 def getbranchmapsubsettable():
394 394 # for "historical portability":
395 395 # subsettable is defined in:
396 396 # - branchmap since 2.9 (or 175c6fd8cacc)
397 397 # - repoview since 2.5 (or 59a9f18d4587)
398 398 for mod in (branchmap, repoview):
399 399 subsettable = getattr(mod, 'subsettable', None)
400 400 if subsettable:
401 401 return subsettable
402 402
403 403 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
404 404 # branchmap and repoview modules exist, but subsettable attribute
405 405 # doesn't)
406 406 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
407 407 hint=b"use 2.5 or later")
408 408
409 409 def getsvfs(repo):
410 410 """Return appropriate object to access files under .hg/store
411 411 """
412 412 # for "historical portability":
413 413 # repo.svfs has been available since 2.3 (or 7034365089bf)
414 414 svfs = getattr(repo, 'svfs', None)
415 415 if svfs:
416 416 return svfs
417 417 else:
418 418 return getattr(repo, 'sopener')
419 419
420 420 def getvfs(repo):
421 421 """Return appropriate object to access files under .hg
422 422 """
423 423 # for "historical portability":
424 424 # repo.vfs has been available since 2.3 (or 7034365089bf)
425 425 vfs = getattr(repo, 'vfs', None)
426 426 if vfs:
427 427 return vfs
428 428 else:
429 429 return getattr(repo, 'opener')
430 430
431 431 def repocleartagscachefunc(repo):
432 432 """Return the function to clear tags cache according to repo internal API
433 433 """
434 434 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
435 435 # in this case, setattr(repo, '_tagscache', None) or so isn't
436 436 # correct way to clear tags cache, because existing code paths
437 437 # expect _tagscache to be a structured object.
438 438 def clearcache():
439 439 # _tagscache has been filteredpropertycache since 2.5 (or
440 440 # 98c867ac1330), and delattr() can't work in such case
441 441 if b'_tagscache' in vars(repo):
442 442 del repo.__dict__[b'_tagscache']
443 443 return clearcache
444 444
445 445 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
446 446 if repotags: # since 1.4 (or 5614a628d173)
447 447 return lambda : repotags.set(None)
448 448
449 449 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
450 450 if repotagscache: # since 0.6 (or d7df759d0e97)
451 451 return lambda : repotagscache.set(None)
452 452
453 453 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
454 454 # this point, but it isn't so problematic, because:
455 455 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
456 456 # in perftags() causes failure soon
457 457 # - perf.py itself has been available since 1.1 (or eb240755386d)
458 458 raise error.Abort((b"tags API of this hg command is unknown"))
459 459
460 460 # utilities to clear cache
461 461
462 462 def clearfilecache(obj, attrname):
463 463 unfiltered = getattr(obj, 'unfiltered', None)
464 464 if unfiltered is not None:
465 465 obj = obj.unfiltered()
466 466 if attrname in vars(obj):
467 467 delattr(obj, attrname)
468 468 obj._filecache.pop(attrname, None)
469 469
470 470 def clearchangelog(repo):
471 471 if repo is not repo.unfiltered():
472 472 object.__setattr__(repo, r'_clcachekey', None)
473 473 object.__setattr__(repo, r'_clcache', None)
474 474 clearfilecache(repo.unfiltered(), 'changelog')
475 475
476 476 # perf commands
477 477
478 478 @command(b'perfwalk', formatteropts)
479 479 def perfwalk(ui, repo, *pats, **opts):
480 480 opts = _byteskwargs(opts)
481 481 timer, fm = gettimer(ui, opts)
482 482 m = scmutil.match(repo[None], pats, {})
483 483 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
484 484 ignored=False))))
485 485 fm.end()
486 486
487 487 @command(b'perfannotate', formatteropts)
488 488 def perfannotate(ui, repo, f, **opts):
489 489 opts = _byteskwargs(opts)
490 490 timer, fm = gettimer(ui, opts)
491 491 fc = repo[b'.'][f]
492 492 timer(lambda: len(fc.annotate(True)))
493 493 fm.end()
494 494
495 495 @command(b'perfstatus',
496 496 [(b'u', b'unknown', False,
497 497 b'ask status to look for unknown files')] + formatteropts)
498 498 def perfstatus(ui, repo, **opts):
499 499 opts = _byteskwargs(opts)
500 500 #m = match.always(repo.root, repo.getcwd())
501 501 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
502 502 # False))))
503 503 timer, fm = gettimer(ui, opts)
504 504 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
505 505 fm.end()
506 506
507 507 @command(b'perfaddremove', formatteropts)
508 508 def perfaddremove(ui, repo, **opts):
509 509 opts = _byteskwargs(opts)
510 510 timer, fm = gettimer(ui, opts)
511 511 try:
512 512 oldquiet = repo.ui.quiet
513 513 repo.ui.quiet = True
514 514 matcher = scmutil.match(repo[None])
515 515 opts[b'dry_run'] = True
516 516 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
517 517 finally:
518 518 repo.ui.quiet = oldquiet
519 519 fm.end()
520 520
521 521 def clearcaches(cl):
522 522 # behave somewhat consistently across internal API changes
523 523 if util.safehasattr(cl, b'clearcaches'):
524 524 cl.clearcaches()
525 525 elif util.safehasattr(cl, b'_nodecache'):
526 526 from mercurial.node import nullid, nullrev
527 527 cl._nodecache = {nullid: nullrev}
528 528 cl._nodepos = None
529 529
530 530 @command(b'perfheads', formatteropts)
531 531 def perfheads(ui, repo, **opts):
532 532 opts = _byteskwargs(opts)
533 533 timer, fm = gettimer(ui, opts)
534 534 cl = repo.changelog
535 535 def d():
536 536 len(cl.headrevs())
537 537 clearcaches(cl)
538 538 timer(d)
539 539 fm.end()
540 540
541 541 @command(b'perftags', formatteropts+
542 542 [
543 543 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
544 544 ])
545 545 def perftags(ui, repo, **opts):
546 546 opts = _byteskwargs(opts)
547 547 timer, fm = gettimer(ui, opts)
548 548 repocleartagscache = repocleartagscachefunc(repo)
549 549 clearrevlogs = opts[b'clear_revlogs']
550 550 def s():
551 551 if clearrevlogs:
552 552 clearchangelog(repo)
553 553 clearfilecache(repo.unfiltered(), 'manifest')
554 554 repocleartagscache()
555 555 def t():
556 556 return len(repo.tags())
557 557 timer(t, setup=s)
558 558 fm.end()
559 559
560 560 @command(b'perfancestors', formatteropts)
561 561 def perfancestors(ui, repo, **opts):
562 562 opts = _byteskwargs(opts)
563 563 timer, fm = gettimer(ui, opts)
564 564 heads = repo.changelog.headrevs()
565 565 def d():
566 566 for a in repo.changelog.ancestors(heads):
567 567 pass
568 568 timer(d)
569 569 fm.end()
570 570
571 571 @command(b'perfancestorset', formatteropts)
572 572 def perfancestorset(ui, repo, revset, **opts):
573 573 opts = _byteskwargs(opts)
574 574 timer, fm = gettimer(ui, opts)
575 575 revs = repo.revs(revset)
576 576 heads = repo.changelog.headrevs()
577 577 def d():
578 578 s = repo.changelog.ancestors(heads)
579 579 for rev in revs:
580 580 rev in s
581 581 timer(d)
582 582 fm.end()
583 583
584 584 @command(b'perfbookmarks', formatteropts +
585 585 [
586 586 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
587 587 ])
588 588 def perfbookmarks(ui, repo, **opts):
589 589 """benchmark parsing bookmarks from disk to memory"""
590 590 opts = _byteskwargs(opts)
591 591 timer, fm = gettimer(ui, opts)
592 592
593 593 clearrevlogs = opts[b'clear_revlogs']
594 594 def s():
595 595 if clearrevlogs:
596 596 clearchangelog(repo)
597 597 clearfilecache(repo, b'_bookmarks')
598 598 def d():
599 599 repo._bookmarks
600 600 timer(d, setup=s)
601 601 fm.end()
602 602
603 603 @command(b'perfbundleread', formatteropts, b'BUNDLE')
604 604 def perfbundleread(ui, repo, bundlepath, **opts):
605 605 """Benchmark reading of bundle files.
606 606
607 607 This command is meant to isolate the I/O part of bundle reading as
608 608 much as possible.
609 609 """
610 610 from mercurial import (
611 611 bundle2,
612 612 exchange,
613 613 streamclone,
614 614 )
615 615
616 616 opts = _byteskwargs(opts)
617 617
618 618 def makebench(fn):
619 619 def run():
620 620 with open(bundlepath, b'rb') as fh:
621 621 bundle = exchange.readbundle(ui, fh, bundlepath)
622 622 fn(bundle)
623 623
624 624 return run
625 625
626 626 def makereadnbytes(size):
627 627 def run():
628 628 with open(bundlepath, b'rb') as fh:
629 629 bundle = exchange.readbundle(ui, fh, bundlepath)
630 630 while bundle.read(size):
631 631 pass
632 632
633 633 return run
634 634
635 635 def makestdioread(size):
636 636 def run():
637 637 with open(bundlepath, b'rb') as fh:
638 638 while fh.read(size):
639 639 pass
640 640
641 641 return run
642 642
643 643 # bundle1
644 644
645 645 def deltaiter(bundle):
646 646 for delta in bundle.deltaiter():
647 647 pass
648 648
649 649 def iterchunks(bundle):
650 650 for chunk in bundle.getchunks():
651 651 pass
652 652
653 653 # bundle2
654 654
655 655 def forwardchunks(bundle):
656 656 for chunk in bundle._forwardchunks():
657 657 pass
658 658
659 659 def iterparts(bundle):
660 660 for part in bundle.iterparts():
661 661 pass
662 662
663 663 def iterpartsseekable(bundle):
664 664 for part in bundle.iterparts(seekable=True):
665 665 pass
666 666
667 667 def seek(bundle):
668 668 for part in bundle.iterparts(seekable=True):
669 669 part.seek(0, os.SEEK_END)
670 670
671 671 def makepartreadnbytes(size):
672 672 def run():
673 673 with open(bundlepath, b'rb') as fh:
674 674 bundle = exchange.readbundle(ui, fh, bundlepath)
675 675 for part in bundle.iterparts():
676 676 while part.read(size):
677 677 pass
678 678
679 679 return run
680 680
681 681 benches = [
682 682 (makestdioread(8192), b'read(8k)'),
683 683 (makestdioread(16384), b'read(16k)'),
684 684 (makestdioread(32768), b'read(32k)'),
685 685 (makestdioread(131072), b'read(128k)'),
686 686 ]
687 687
688 688 with open(bundlepath, b'rb') as fh:
689 689 bundle = exchange.readbundle(ui, fh, bundlepath)
690 690
691 691 if isinstance(bundle, changegroup.cg1unpacker):
692 692 benches.extend([
693 693 (makebench(deltaiter), b'cg1 deltaiter()'),
694 694 (makebench(iterchunks), b'cg1 getchunks()'),
695 695 (makereadnbytes(8192), b'cg1 read(8k)'),
696 696 (makereadnbytes(16384), b'cg1 read(16k)'),
697 697 (makereadnbytes(32768), b'cg1 read(32k)'),
698 698 (makereadnbytes(131072), b'cg1 read(128k)'),
699 699 ])
700 700 elif isinstance(bundle, bundle2.unbundle20):
701 701 benches.extend([
702 702 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
703 703 (makebench(iterparts), b'bundle2 iterparts()'),
704 704 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
705 705 (makebench(seek), b'bundle2 part seek()'),
706 706 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
707 707 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
708 708 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
709 709 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
710 710 ])
711 711 elif isinstance(bundle, streamclone.streamcloneapplier):
712 712 raise error.Abort(b'stream clone bundles not supported')
713 713 else:
714 714 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
715 715
716 716 for fn, title in benches:
717 717 timer, fm = gettimer(ui, opts)
718 718 timer(fn, title=title)
719 719 fm.end()
720 720
721 721 @command(b'perfchangegroupchangelog', formatteropts +
722 722 [(b'', b'cgversion', b'02', b'changegroup version'),
723 723 (b'r', b'rev', b'', b'revisions to add to changegroup')])
724 724 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
725 725 """Benchmark producing a changelog group for a changegroup.
726 726
727 727 This measures the time spent processing the changelog during a
728 728 bundle operation. This occurs during `hg bundle` and on a server
729 729 processing a `getbundle` wire protocol request (handles clones
730 730 and pull requests).
731 731
732 732 By default, all revisions are added to the changegroup.
733 733 """
734 734 opts = _byteskwargs(opts)
735 735 cl = repo.changelog
736 736 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
737 737 bundler = changegroup.getbundler(cgversion, repo)
738 738
739 739 def d():
740 740 state, chunks = bundler._generatechangelog(cl, nodes)
741 741 for chunk in chunks:
742 742 pass
743 743
744 744 timer, fm = gettimer(ui, opts)
745 745
746 746 # Terminal printing can interfere with timing. So disable it.
747 747 with ui.configoverride({(b'progress', b'disable'): True}):
748 748 timer(d)
749 749
750 750 fm.end()
751 751
752 752 @command(b'perfdirs', formatteropts)
753 753 def perfdirs(ui, repo, **opts):
754 754 opts = _byteskwargs(opts)
755 755 timer, fm = gettimer(ui, opts)
756 756 dirstate = repo.dirstate
757 757 b'a' in dirstate
758 758 def d():
759 759 dirstate.hasdir(b'a')
760 760 del dirstate._map._dirs
761 761 timer(d)
762 762 fm.end()
763 763
764 764 @command(b'perfdirstate', formatteropts)
765 765 def perfdirstate(ui, repo, **opts):
766 766 opts = _byteskwargs(opts)
767 767 timer, fm = gettimer(ui, opts)
768 768 b"a" in repo.dirstate
769 769 def d():
770 770 repo.dirstate.invalidate()
771 771 b"a" in repo.dirstate
772 772 timer(d)
773 773 fm.end()
774 774
775 775 @command(b'perfdirstatedirs', formatteropts)
776 776 def perfdirstatedirs(ui, repo, **opts):
777 777 opts = _byteskwargs(opts)
778 778 timer, fm = gettimer(ui, opts)
779 779 b"a" in repo.dirstate
780 780 def d():
781 781 repo.dirstate.hasdir(b"a")
782 782 del repo.dirstate._map._dirs
783 783 timer(d)
784 784 fm.end()
785 785
786 786 @command(b'perfdirstatefoldmap', formatteropts)
787 787 def perfdirstatefoldmap(ui, repo, **opts):
788 788 opts = _byteskwargs(opts)
789 789 timer, fm = gettimer(ui, opts)
790 790 dirstate = repo.dirstate
791 791 b'a' in dirstate
792 792 def d():
793 793 dirstate._map.filefoldmap.get(b'a')
794 794 del dirstate._map.filefoldmap
795 795 timer(d)
796 796 fm.end()
797 797
798 798 @command(b'perfdirfoldmap', formatteropts)
799 799 def perfdirfoldmap(ui, repo, **opts):
800 800 opts = _byteskwargs(opts)
801 801 timer, fm = gettimer(ui, opts)
802 802 dirstate = repo.dirstate
803 803 b'a' in dirstate
804 804 def d():
805 805 dirstate._map.dirfoldmap.get(b'a')
806 806 del dirstate._map.dirfoldmap
807 807 del dirstate._map._dirs
808 808 timer(d)
809 809 fm.end()
810 810
811 811 @command(b'perfdirstatewrite', formatteropts)
812 812 def perfdirstatewrite(ui, repo, **opts):
813 813 opts = _byteskwargs(opts)
814 814 timer, fm = gettimer(ui, opts)
815 815 ds = repo.dirstate
816 816 b"a" in ds
817 817 def d():
818 818 ds._dirty = True
819 819 ds.write(repo.currenttransaction())
820 820 timer(d)
821 821 fm.end()
822 822
823 823 @command(b'perfmergecalculate',
824 824 [(b'r', b'rev', b'.', b'rev to merge against')] + formatteropts)
825 825 def perfmergecalculate(ui, repo, rev, **opts):
826 826 opts = _byteskwargs(opts)
827 827 timer, fm = gettimer(ui, opts)
828 828 wctx = repo[None]
829 829 rctx = scmutil.revsingle(repo, rev, rev)
830 830 ancestor = wctx.ancestor(rctx)
831 831 # we don't want working dir files to be stat'd in the benchmark, so prime
832 832 # that cache
833 833 wctx.dirty()
834 834 def d():
835 835 # acceptremote is True because we don't want prompts in the middle of
836 836 # our benchmark
837 837 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
838 838 acceptremote=True, followcopies=True)
839 839 timer(d)
840 840 fm.end()
841 841
842 842 @command(b'perfpathcopies', [], b"REV REV")
843 843 def perfpathcopies(ui, repo, rev1, rev2, **opts):
844 844 """benchmark the copy tracing logic"""
845 845 opts = _byteskwargs(opts)
846 846 timer, fm = gettimer(ui, opts)
847 847 ctx1 = scmutil.revsingle(repo, rev1, rev1)
848 848 ctx2 = scmutil.revsingle(repo, rev2, rev2)
849 849 def d():
850 850 copies.pathcopies(ctx1, ctx2)
851 851 timer(d)
852 852 fm.end()
853 853
854 854 @command(b'perfphases',
855 855 [(b'', b'full', False, b'include file reading time too'),
856 856 ], b"")
857 857 def perfphases(ui, repo, **opts):
858 858 """benchmark phasesets computation"""
859 859 opts = _byteskwargs(opts)
860 860 timer, fm = gettimer(ui, opts)
861 861 _phases = repo._phasecache
862 862 full = opts.get(b'full')
863 863 def d():
864 864 phases = _phases
865 865 if full:
866 866 clearfilecache(repo, b'_phasecache')
867 867 phases = repo._phasecache
868 868 phases.invalidate()
869 869 phases.loadphaserevs(repo)
870 870 timer(d)
871 871 fm.end()
872 872
873 873 @command(b'perfphasesremote',
874 874 [], b"[DEST]")
875 875 def perfphasesremote(ui, repo, dest=None, **opts):
876 876 """benchmark time needed to analyse phases of the remote server"""
877 877 from mercurial.node import (
878 878 bin,
879 879 )
880 880 from mercurial import (
881 881 exchange,
882 882 hg,
883 883 phases,
884 884 )
885 885 opts = _byteskwargs(opts)
886 886 timer, fm = gettimer(ui, opts)
887 887
888 888 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
889 889 if not path:
890 890 raise error.Abort((b'default repository not configured!'),
891 891 hint=(b"see 'hg help config.paths'"))
892 892 dest = path.pushloc or path.loc
893 893 branches = (path.branch, opts.get(b'branch') or [])
894 894 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
895 895 revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get(b'rev'))
896 896 other = hg.peer(repo, opts, dest)
897 897
898 898 # easier to perform discovery through the operation
899 899 op = exchange.pushoperation(repo, other)
900 900 exchange._pushdiscoverychangeset(op)
901 901
902 902 remotesubset = op.fallbackheads
903 903
904 904 with other.commandexecutor() as e:
905 905 remotephases = e.callcommand(b'listkeys',
906 906 {b'namespace': b'phases'}).result()
907 907 del other
908 908 publishing = remotephases.get(b'publishing', False)
909 909 if publishing:
910 910 ui.status((b'publishing: yes\n'))
911 911 else:
912 912 ui.status((b'publishing: no\n'))
913 913
914 914 nodemap = repo.changelog.nodemap
915 915 nonpublishroots = 0
916 916 for nhex, phase in remotephases.iteritems():
917 917 if nhex == b'publishing': # ignore data related to publish option
918 918 continue
919 919 node = bin(nhex)
920 920 if node in nodemap and int(phase):
921 921 nonpublishroots += 1
922 922 ui.status((b'number of roots: %d\n') % len(remotephases))
923 923 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
924 924 def d():
925 925 phases.remotephasessummary(repo,
926 926 remotesubset,
927 927 remotephases)
928 928 timer(d)
929 929 fm.end()
930 930
931 931 @command(b'perfmanifest',[
932 932 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
933 933 (b'', b'clear-disk', False, b'clear on-disk caches too'),
934 934 ] + formatteropts, b'REV|NODE')
935 935 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
936 936 """benchmark the time to read a manifest from disk and return a usable
937 937 dict-like object
938 938
939 939 Manifest caches are cleared before retrieval."""
940 940 opts = _byteskwargs(opts)
941 941 timer, fm = gettimer(ui, opts)
942 942 if not manifest_rev:
943 943 ctx = scmutil.revsingle(repo, rev, rev)
944 944 t = ctx.manifestnode()
945 945 else:
946 946 from mercurial.node import bin
947 947
948 948 if len(rev) == 40:
949 949 t = bin(rev)
950 950 else:
951 951 try:
952 952 rev = int(rev)
953 953
954 954 if util.safehasattr(repo.manifestlog, b'getstorage'):
955 955 t = repo.manifestlog.getstorage(b'').node(rev)
956 956 else:
957 957 t = repo.manifestlog._revlog.lookup(rev)
958 958 except ValueError:
959 959 raise error.Abort(b'manifest revision must be integer or full '
960 960 b'node')
961 961 def d():
962 962 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
963 963 repo.manifestlog[t].read()
964 964 timer(d)
965 965 fm.end()
966 966
967 967 @command(b'perfchangeset', formatteropts)
968 968 def perfchangeset(ui, repo, rev, **opts):
969 969 opts = _byteskwargs(opts)
970 970 timer, fm = gettimer(ui, opts)
971 971 n = scmutil.revsingle(repo, rev).node()
972 972 def d():
973 973 repo.changelog.read(n)
974 974 #repo.changelog._cache = None
975 975 timer(d)
976 976 fm.end()
977 977
978 @command(b'perfignore', formatteropts)
979 def perfignore(ui, repo, **opts):
980 """benchmark operation related to computing ignore"""
981 opts = _byteskwargs(opts)
982 timer, fm = gettimer(ui, opts)
983 dirstate = repo.dirstate
984
985 def setupone():
986 dirstate.invalidate()
987 clearfilecache(dirstate, b'_ignore')
988
989 def runone():
990 dirstate._ignore
991
992 timer(runone, setup=setupone, title=b"load")
993 fm.end()
994
978 995 @command(b'perfindex', formatteropts)
979 996 def perfindex(ui, repo, **opts):
980 997 import mercurial.revlog
981 998 opts = _byteskwargs(opts)
982 999 timer, fm = gettimer(ui, opts)
983 1000 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
984 1001 n = repo[b"tip"].node()
985 1002 svfs = getsvfs(repo)
986 1003 def d():
987 1004 cl = mercurial.revlog.revlog(svfs, b"00changelog.i")
988 1005 cl.rev(n)
989 1006 timer(d)
990 1007 fm.end()
991 1008
992 1009 @command(b'perfstartup', formatteropts)
993 1010 def perfstartup(ui, repo, **opts):
994 1011 opts = _byteskwargs(opts)
995 1012 timer, fm = gettimer(ui, opts)
996 1013 def d():
997 1014 if os.name != r'nt':
998 1015 os.system(b"HGRCPATH= %s version -q > /dev/null" %
999 1016 fsencode(sys.argv[0]))
1000 1017 else:
1001 1018 os.environ[r'HGRCPATH'] = r' '
1002 1019 os.system(r"%s version -q > NUL" % sys.argv[0])
1003 1020 timer(d)
1004 1021 fm.end()
1005 1022
1006 1023 @command(b'perfparents', formatteropts)
1007 1024 def perfparents(ui, repo, **opts):
1008 1025 opts = _byteskwargs(opts)
1009 1026 timer, fm = gettimer(ui, opts)
1010 1027 # control the number of commits perfparents iterates over
1011 1028 # experimental config: perf.parentscount
1012 1029 count = getint(ui, b"perf", b"parentscount", 1000)
1013 1030 if len(repo.changelog) < count:
1014 1031 raise error.Abort(b"repo needs %d commits for this test" % count)
1015 1032 repo = repo.unfiltered()
1016 1033 nl = [repo.changelog.node(i) for i in _xrange(count)]
1017 1034 def d():
1018 1035 for n in nl:
1019 1036 repo.changelog.parents(n)
1020 1037 timer(d)
1021 1038 fm.end()
1022 1039
1023 1040 @command(b'perfctxfiles', formatteropts)
1024 1041 def perfctxfiles(ui, repo, x, **opts):
1025 1042 opts = _byteskwargs(opts)
1026 1043 x = int(x)
1027 1044 timer, fm = gettimer(ui, opts)
1028 1045 def d():
1029 1046 len(repo[x].files())
1030 1047 timer(d)
1031 1048 fm.end()
1032 1049
1033 1050 @command(b'perfrawfiles', formatteropts)
1034 1051 def perfrawfiles(ui, repo, x, **opts):
1035 1052 opts = _byteskwargs(opts)
1036 1053 x = int(x)
1037 1054 timer, fm = gettimer(ui, opts)
1038 1055 cl = repo.changelog
1039 1056 def d():
1040 1057 len(cl.read(x)[3])
1041 1058 timer(d)
1042 1059 fm.end()
1043 1060
1044 1061 @command(b'perflookup', formatteropts)
1045 1062 def perflookup(ui, repo, rev, **opts):
1046 1063 opts = _byteskwargs(opts)
1047 1064 timer, fm = gettimer(ui, opts)
1048 1065 timer(lambda: len(repo.lookup(rev)))
1049 1066 fm.end()
1050 1067
1051 1068 @command(b'perflinelogedits',
1052 1069 [(b'n', b'edits', 10000, b'number of edits'),
1053 1070 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1054 1071 ], norepo=True)
1055 1072 def perflinelogedits(ui, **opts):
1056 1073 from mercurial import linelog
1057 1074
1058 1075 opts = _byteskwargs(opts)
1059 1076
1060 1077 edits = opts[b'edits']
1061 1078 maxhunklines = opts[b'max_hunk_lines']
1062 1079
1063 1080 maxb1 = 100000
1064 1081 random.seed(0)
1065 1082 randint = random.randint
1066 1083 currentlines = 0
1067 1084 arglist = []
1068 1085 for rev in _xrange(edits):
1069 1086 a1 = randint(0, currentlines)
1070 1087 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1071 1088 b1 = randint(0, maxb1)
1072 1089 b2 = randint(b1, b1 + maxhunklines)
1073 1090 currentlines += (b2 - b1) - (a2 - a1)
1074 1091 arglist.append((rev, a1, a2, b1, b2))
1075 1092
1076 1093 def d():
1077 1094 ll = linelog.linelog()
1078 1095 for args in arglist:
1079 1096 ll.replacelines(*args)
1080 1097
1081 1098 timer, fm = gettimer(ui, opts)
1082 1099 timer(d)
1083 1100 fm.end()
1084 1101
1085 1102 @command(b'perfrevrange', formatteropts)
1086 1103 def perfrevrange(ui, repo, *specs, **opts):
1087 1104 opts = _byteskwargs(opts)
1088 1105 timer, fm = gettimer(ui, opts)
1089 1106 revrange = scmutil.revrange
1090 1107 timer(lambda: len(revrange(repo, specs)))
1091 1108 fm.end()
1092 1109
1093 1110 @command(b'perfnodelookup', formatteropts)
1094 1111 def perfnodelookup(ui, repo, rev, **opts):
1095 1112 opts = _byteskwargs(opts)
1096 1113 timer, fm = gettimer(ui, opts)
1097 1114 import mercurial.revlog
1098 1115 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1099 1116 n = scmutil.revsingle(repo, rev).node()
1100 1117 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1101 1118 def d():
1102 1119 cl.rev(n)
1103 1120 clearcaches(cl)
1104 1121 timer(d)
1105 1122 fm.end()
1106 1123
1107 1124 @command(b'perflog',
1108 1125 [(b'', b'rename', False, b'ask log to follow renames')
1109 1126 ] + formatteropts)
1110 1127 def perflog(ui, repo, rev=None, **opts):
1111 1128 opts = _byteskwargs(opts)
1112 1129 if rev is None:
1113 1130 rev=[]
1114 1131 timer, fm = gettimer(ui, opts)
1115 1132 ui.pushbuffer()
1116 1133 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1117 1134 copies=opts.get(b'rename')))
1118 1135 ui.popbuffer()
1119 1136 fm.end()
1120 1137
1121 1138 @command(b'perfmoonwalk', formatteropts)
1122 1139 def perfmoonwalk(ui, repo, **opts):
1123 1140 """benchmark walking the changelog backwards
1124 1141
1125 1142 This also loads the changelog data for each revision in the changelog.
1126 1143 """
1127 1144 opts = _byteskwargs(opts)
1128 1145 timer, fm = gettimer(ui, opts)
1129 1146 def moonwalk():
1130 1147 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1131 1148 ctx = repo[i]
1132 1149 ctx.branch() # read changelog data (in addition to the index)
1133 1150 timer(moonwalk)
1134 1151 fm.end()
1135 1152
1136 1153 @command(b'perftemplating',
1137 1154 [(b'r', b'rev', [], b'revisions to run the template on'),
1138 1155 ] + formatteropts)
1139 1156 def perftemplating(ui, repo, testedtemplate=None, **opts):
1140 1157 """test the rendering time of a given template"""
1141 1158 if makelogtemplater is None:
1142 1159 raise error.Abort((b"perftemplating not available with this Mercurial"),
1143 1160 hint=b"use 4.3 or later")
1144 1161
1145 1162 opts = _byteskwargs(opts)
1146 1163
1147 1164 nullui = ui.copy()
1148 1165 nullui.fout = open(os.devnull, r'wb')
1149 1166 nullui.disablepager()
1150 1167 revs = opts.get(b'rev')
1151 1168 if not revs:
1152 1169 revs = [b'all()']
1153 1170 revs = list(scmutil.revrange(repo, revs))
1154 1171
1155 1172 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1156 1173 b' {author|person}: {desc|firstline}\n')
1157 1174 if testedtemplate is None:
1158 1175 testedtemplate = defaulttemplate
1159 1176 displayer = makelogtemplater(nullui, repo, testedtemplate)
1160 1177 def format():
1161 1178 for r in revs:
1162 1179 ctx = repo[r]
1163 1180 displayer.show(ctx)
1164 1181 displayer.flush(ctx)
1165 1182
1166 1183 timer, fm = gettimer(ui, opts)
1167 1184 timer(format)
1168 1185 fm.end()
1169 1186
1170 1187 @command(b'perfhelper-pathcopies', formatteropts +
1171 1188 [
1172 1189 (b'r', b'revs', [], b'restrict search to these revisions'),
1173 1190 (b'', b'timing', False, b'provides extra data (costly)'),
1174 1191 ])
1175 1192 def perfhelperpathcopies(ui, repo, revs=[], **opts):
1176 1193 """find statistic about potential parameters for the `perftracecopies`
1177 1194
1178 1195 This command find source-destination pair relevant for copytracing testing.
1179 1196 It report value for some of the parameters that impact copy tracing time.
1180 1197
1181 1198 If `--timing` is set, rename detection is run and the associated timing
1182 1199 will be reported. The extra details comes at the cost of a slower command
1183 1200 execution.
1184 1201
1185 1202 Since the rename detection is only run once, other factors might easily
1186 1203 affect the precision of the timing. However it should give a good
1187 1204 approximation of which revision pairs are very costly.
1188 1205 """
1189 1206 opts = _byteskwargs(opts)
1190 1207 fm = ui.formatter(b'perf', opts)
1191 1208 dotiming = opts[b'timing']
1192 1209
1193 1210 if dotiming:
1194 1211 header = '%12s %12s %12s %12s %12s %12s\n'
1195 1212 output = ("%(source)12s %(destination)12s "
1196 1213 "%(nbrevs)12d %(nbmissingfiles)12d "
1197 1214 "%(nbrenamedfiles)12d %(time)18.5f\n")
1198 1215 header_names = ("source", "destination", "nb-revs", "nb-files",
1199 1216 "nb-renames", "time")
1200 1217 fm.plain(header % header_names)
1201 1218 else:
1202 1219 header = '%12s %12s %12s %12s\n'
1203 1220 output = ("%(source)12s %(destination)12s "
1204 1221 "%(nbrevs)12d %(nbmissingfiles)12d\n")
1205 1222 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
1206 1223
1207 1224 if not revs:
1208 1225 revs = ['all()']
1209 1226 revs = scmutil.revrange(repo, revs)
1210 1227
1211 1228 roi = repo.revs('merge() and %ld', revs)
1212 1229 for r in roi:
1213 1230 ctx = repo[r]
1214 1231 p1 = ctx.p1().rev()
1215 1232 p2 = ctx.p2().rev()
1216 1233 bases = repo.changelog._commonancestorsheads(p1, p2)
1217 1234 for p in (p1, p2):
1218 1235 for b in bases:
1219 1236 base = repo[b]
1220 1237 parent = repo[p]
1221 1238 missing = copies._computeforwardmissing(base, parent)
1222 1239 if not missing:
1223 1240 continue
1224 1241 data = {
1225 1242 b'source': base.hex(),
1226 1243 b'destination': parent.hex(),
1227 1244 b'nbrevs': len(repo.revs('%d::%d', b, p)),
1228 1245 b'nbmissingfiles': len(missing),
1229 1246 }
1230 1247 if dotiming:
1231 1248 begin = util.timer()
1232 1249 renames = copies.pathcopies(base, parent)
1233 1250 end = util.timer()
1234 1251 # not very stable timing since we did only one run
1235 1252 data['time'] = end - begin
1236 1253 data['nbrenamedfiles'] = len(renames)
1237 1254 fm.startitem()
1238 1255 fm.data(**data)
1239 1256 out = data.copy()
1240 1257 out['source'] = fm.hexfunc(base.node())
1241 1258 out['destination'] = fm.hexfunc(parent.node())
1242 1259 fm.plain(output % out)
1243 1260
1244 1261 fm.end()
1245 1262
1246 1263 @command(b'perfcca', formatteropts)
1247 1264 def perfcca(ui, repo, **opts):
1248 1265 opts = _byteskwargs(opts)
1249 1266 timer, fm = gettimer(ui, opts)
1250 1267 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1251 1268 fm.end()
1252 1269
1253 1270 @command(b'perffncacheload', formatteropts)
1254 1271 def perffncacheload(ui, repo, **opts):
1255 1272 opts = _byteskwargs(opts)
1256 1273 timer, fm = gettimer(ui, opts)
1257 1274 s = repo.store
1258 1275 def d():
1259 1276 s.fncache._load()
1260 1277 timer(d)
1261 1278 fm.end()
1262 1279
1263 1280 @command(b'perffncachewrite', formatteropts)
1264 1281 def perffncachewrite(ui, repo, **opts):
1265 1282 opts = _byteskwargs(opts)
1266 1283 timer, fm = gettimer(ui, opts)
1267 1284 s = repo.store
1268 1285 lock = repo.lock()
1269 1286 s.fncache._load()
1270 1287 tr = repo.transaction(b'perffncachewrite')
1271 1288 tr.addbackup(b'fncache')
1272 1289 def d():
1273 1290 s.fncache._dirty = True
1274 1291 s.fncache.write(tr)
1275 1292 timer(d)
1276 1293 tr.close()
1277 1294 lock.release()
1278 1295 fm.end()
1279 1296
1280 1297 @command(b'perffncacheencode', formatteropts)
1281 1298 def perffncacheencode(ui, repo, **opts):
1282 1299 opts = _byteskwargs(opts)
1283 1300 timer, fm = gettimer(ui, opts)
1284 1301 s = repo.store
1285 1302 s.fncache._load()
1286 1303 def d():
1287 1304 for p in s.fncache.entries:
1288 1305 s.encode(p)
1289 1306 timer(d)
1290 1307 fm.end()
1291 1308
1292 1309 def _bdiffworker(q, blocks, xdiff, ready, done):
1293 1310 while not done.is_set():
1294 1311 pair = q.get()
1295 1312 while pair is not None:
1296 1313 if xdiff:
1297 1314 mdiff.bdiff.xdiffblocks(*pair)
1298 1315 elif blocks:
1299 1316 mdiff.bdiff.blocks(*pair)
1300 1317 else:
1301 1318 mdiff.textdiff(*pair)
1302 1319 q.task_done()
1303 1320 pair = q.get()
1304 1321 q.task_done() # for the None one
1305 1322 with ready:
1306 1323 ready.wait()
1307 1324
1308 1325 def _manifestrevision(repo, mnode):
1309 1326 ml = repo.manifestlog
1310 1327
1311 1328 if util.safehasattr(ml, b'getstorage'):
1312 1329 store = ml.getstorage(b'')
1313 1330 else:
1314 1331 store = ml._revlog
1315 1332
1316 1333 return store.revision(mnode)
1317 1334
1318 1335 @command(b'perfbdiff', revlogopts + formatteropts + [
1319 1336 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1320 1337 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1321 1338 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1322 1339 (b'', b'blocks', False, b'test computing diffs into blocks'),
1323 1340 (b'', b'xdiff', False, b'use xdiff algorithm'),
1324 1341 ],
1325 1342
1326 1343 b'-c|-m|FILE REV')
1327 1344 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1328 1345 """benchmark a bdiff between revisions
1329 1346
1330 1347 By default, benchmark a bdiff between its delta parent and itself.
1331 1348
1332 1349 With ``--count``, benchmark bdiffs between delta parents and self for N
1333 1350 revisions starting at the specified revision.
1334 1351
1335 1352 With ``--alldata``, assume the requested revision is a changeset and
1336 1353 measure bdiffs for all changes related to that changeset (manifest
1337 1354 and filelogs).
1338 1355 """
1339 1356 opts = _byteskwargs(opts)
1340 1357
1341 1358 if opts[b'xdiff'] and not opts[b'blocks']:
1342 1359 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1343 1360
1344 1361 if opts[b'alldata']:
1345 1362 opts[b'changelog'] = True
1346 1363
1347 1364 if opts.get(b'changelog') or opts.get(b'manifest'):
1348 1365 file_, rev = None, file_
1349 1366 elif rev is None:
1350 1367 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1351 1368
1352 1369 blocks = opts[b'blocks']
1353 1370 xdiff = opts[b'xdiff']
1354 1371 textpairs = []
1355 1372
1356 1373 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1357 1374
1358 1375 startrev = r.rev(r.lookup(rev))
1359 1376 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1360 1377 if opts[b'alldata']:
1361 1378 # Load revisions associated with changeset.
1362 1379 ctx = repo[rev]
1363 1380 mtext = _manifestrevision(repo, ctx.manifestnode())
1364 1381 for pctx in ctx.parents():
1365 1382 pman = _manifestrevision(repo, pctx.manifestnode())
1366 1383 textpairs.append((pman, mtext))
1367 1384
1368 1385 # Load filelog revisions by iterating manifest delta.
1369 1386 man = ctx.manifest()
1370 1387 pman = ctx.p1().manifest()
1371 1388 for filename, change in pman.diff(man).items():
1372 1389 fctx = repo.file(filename)
1373 1390 f1 = fctx.revision(change[0][0] or -1)
1374 1391 f2 = fctx.revision(change[1][0] or -1)
1375 1392 textpairs.append((f1, f2))
1376 1393 else:
1377 1394 dp = r.deltaparent(rev)
1378 1395 textpairs.append((r.revision(dp), r.revision(rev)))
1379 1396
1380 1397 withthreads = threads > 0
1381 1398 if not withthreads:
1382 1399 def d():
1383 1400 for pair in textpairs:
1384 1401 if xdiff:
1385 1402 mdiff.bdiff.xdiffblocks(*pair)
1386 1403 elif blocks:
1387 1404 mdiff.bdiff.blocks(*pair)
1388 1405 else:
1389 1406 mdiff.textdiff(*pair)
1390 1407 else:
1391 1408 q = queue()
1392 1409 for i in _xrange(threads):
1393 1410 q.put(None)
1394 1411 ready = threading.Condition()
1395 1412 done = threading.Event()
1396 1413 for i in _xrange(threads):
1397 1414 threading.Thread(target=_bdiffworker,
1398 1415 args=(q, blocks, xdiff, ready, done)).start()
1399 1416 q.join()
1400 1417 def d():
1401 1418 for pair in textpairs:
1402 1419 q.put(pair)
1403 1420 for i in _xrange(threads):
1404 1421 q.put(None)
1405 1422 with ready:
1406 1423 ready.notify_all()
1407 1424 q.join()
1408 1425 timer, fm = gettimer(ui, opts)
1409 1426 timer(d)
1410 1427 fm.end()
1411 1428
1412 1429 if withthreads:
1413 1430 done.set()
1414 1431 for i in _xrange(threads):
1415 1432 q.put(None)
1416 1433 with ready:
1417 1434 ready.notify_all()
1418 1435
1419 1436 @command(b'perfunidiff', revlogopts + formatteropts + [
1420 1437 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1421 1438 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
1422 1439 ], b'-c|-m|FILE REV')
1423 1440 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1424 1441 """benchmark a unified diff between revisions
1425 1442
1426 1443 This doesn't include any copy tracing - it's just a unified diff
1427 1444 of the texts.
1428 1445
1429 1446 By default, benchmark a diff between its delta parent and itself.
1430 1447
1431 1448 With ``--count``, benchmark diffs between delta parents and self for N
1432 1449 revisions starting at the specified revision.
1433 1450
1434 1451 With ``--alldata``, assume the requested revision is a changeset and
1435 1452 measure diffs for all changes related to that changeset (manifest
1436 1453 and filelogs).
1437 1454 """
1438 1455 opts = _byteskwargs(opts)
1439 1456 if opts[b'alldata']:
1440 1457 opts[b'changelog'] = True
1441 1458
1442 1459 if opts.get(b'changelog') or opts.get(b'manifest'):
1443 1460 file_, rev = None, file_
1444 1461 elif rev is None:
1445 1462 raise error.CommandError(b'perfunidiff', b'invalid arguments')
1446 1463
1447 1464 textpairs = []
1448 1465
1449 1466 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
1450 1467
1451 1468 startrev = r.rev(r.lookup(rev))
1452 1469 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1453 1470 if opts[b'alldata']:
1454 1471 # Load revisions associated with changeset.
1455 1472 ctx = repo[rev]
1456 1473 mtext = _manifestrevision(repo, ctx.manifestnode())
1457 1474 for pctx in ctx.parents():
1458 1475 pman = _manifestrevision(repo, pctx.manifestnode())
1459 1476 textpairs.append((pman, mtext))
1460 1477
1461 1478 # Load filelog revisions by iterating manifest delta.
1462 1479 man = ctx.manifest()
1463 1480 pman = ctx.p1().manifest()
1464 1481 for filename, change in pman.diff(man).items():
1465 1482 fctx = repo.file(filename)
1466 1483 f1 = fctx.revision(change[0][0] or -1)
1467 1484 f2 = fctx.revision(change[1][0] or -1)
1468 1485 textpairs.append((f1, f2))
1469 1486 else:
1470 1487 dp = r.deltaparent(rev)
1471 1488 textpairs.append((r.revision(dp), r.revision(rev)))
1472 1489
1473 1490 def d():
1474 1491 for left, right in textpairs:
1475 1492 # The date strings don't matter, so we pass empty strings.
1476 1493 headerlines, hunks = mdiff.unidiff(
1477 1494 left, b'', right, b'', b'left', b'right', binary=False)
1478 1495 # consume iterators in roughly the way patch.py does
1479 1496 b'\n'.join(headerlines)
1480 1497 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1481 1498 timer, fm = gettimer(ui, opts)
1482 1499 timer(d)
1483 1500 fm.end()
1484 1501
1485 1502 @command(b'perfdiffwd', formatteropts)
1486 1503 def perfdiffwd(ui, repo, **opts):
1487 1504 """Profile diff of working directory changes"""
1488 1505 opts = _byteskwargs(opts)
1489 1506 timer, fm = gettimer(ui, opts)
1490 1507 options = {
1491 1508 'w': 'ignore_all_space',
1492 1509 'b': 'ignore_space_change',
1493 1510 'B': 'ignore_blank_lines',
1494 1511 }
1495 1512
1496 1513 for diffopt in ('', 'w', 'b', 'B', 'wB'):
1497 1514 opts = dict((options[c], b'1') for c in diffopt)
1498 1515 def d():
1499 1516 ui.pushbuffer()
1500 1517 commands.diff(ui, repo, **opts)
1501 1518 ui.popbuffer()
1502 1519 diffopt = diffopt.encode('ascii')
1503 1520 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
1504 1521 timer(d, title=title)
1505 1522 fm.end()
1506 1523
1507 1524 @command(b'perfrevlogindex', revlogopts + formatteropts,
1508 1525 b'-c|-m|FILE')
1509 1526 def perfrevlogindex(ui, repo, file_=None, **opts):
1510 1527 """Benchmark operations against a revlog index.
1511 1528
1512 1529 This tests constructing a revlog instance, reading index data,
1513 1530 parsing index data, and performing various operations related to
1514 1531 index data.
1515 1532 """
1516 1533
1517 1534 opts = _byteskwargs(opts)
1518 1535
1519 1536 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
1520 1537
1521 1538 opener = getattr(rl, 'opener') # trick linter
1522 1539 indexfile = rl.indexfile
1523 1540 data = opener.read(indexfile)
1524 1541
1525 1542 header = struct.unpack(b'>I', data[0:4])[0]
1526 1543 version = header & 0xFFFF
1527 1544 if version == 1:
1528 1545 revlogio = revlog.revlogio()
1529 1546 inline = header & (1 << 16)
1530 1547 else:
1531 1548 raise error.Abort((b'unsupported revlog version: %d') % version)
1532 1549
1533 1550 rllen = len(rl)
1534 1551
1535 1552 node0 = rl.node(0)
1536 1553 node25 = rl.node(rllen // 4)
1537 1554 node50 = rl.node(rllen // 2)
1538 1555 node75 = rl.node(rllen // 4 * 3)
1539 1556 node100 = rl.node(rllen - 1)
1540 1557
1541 1558 allrevs = range(rllen)
1542 1559 allrevsrev = list(reversed(allrevs))
1543 1560 allnodes = [rl.node(rev) for rev in range(rllen)]
1544 1561 allnodesrev = list(reversed(allnodes))
1545 1562
1546 1563 def constructor():
1547 1564 revlog.revlog(opener, indexfile)
1548 1565
1549 1566 def read():
1550 1567 with opener(indexfile) as fh:
1551 1568 fh.read()
1552 1569
1553 1570 def parseindex():
1554 1571 revlogio.parseindex(data, inline)
1555 1572
1556 1573 def getentry(revornode):
1557 1574 index = revlogio.parseindex(data, inline)[0]
1558 1575 index[revornode]
1559 1576
1560 1577 def getentries(revs, count=1):
1561 1578 index = revlogio.parseindex(data, inline)[0]
1562 1579
1563 1580 for i in range(count):
1564 1581 for rev in revs:
1565 1582 index[rev]
1566 1583
1567 1584 def resolvenode(node):
1568 1585 nodemap = revlogio.parseindex(data, inline)[1]
1569 1586 # This only works for the C code.
1570 1587 if nodemap is None:
1571 1588 return
1572 1589
1573 1590 try:
1574 1591 nodemap[node]
1575 1592 except error.RevlogError:
1576 1593 pass
1577 1594
1578 1595 def resolvenodes(nodes, count=1):
1579 1596 nodemap = revlogio.parseindex(data, inline)[1]
1580 1597 if nodemap is None:
1581 1598 return
1582 1599
1583 1600 for i in range(count):
1584 1601 for node in nodes:
1585 1602 try:
1586 1603 nodemap[node]
1587 1604 except error.RevlogError:
1588 1605 pass
1589 1606
1590 1607 benches = [
1591 1608 (constructor, b'revlog constructor'),
1592 1609 (read, b'read'),
1593 1610 (parseindex, b'create index object'),
1594 1611 (lambda: getentry(0), b'retrieve index entry for rev 0'),
1595 1612 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
1596 1613 (lambda: resolvenode(node0), b'look up node at rev 0'),
1597 1614 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
1598 1615 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
1599 1616 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
1600 1617 (lambda: resolvenode(node100), b'look up node at tip'),
1601 1618 # 2x variation is to measure caching impact.
1602 1619 (lambda: resolvenodes(allnodes),
1603 1620 b'look up all nodes (forward)'),
1604 1621 (lambda: resolvenodes(allnodes, 2),
1605 1622 b'look up all nodes 2x (forward)'),
1606 1623 (lambda: resolvenodes(allnodesrev),
1607 1624 b'look up all nodes (reverse)'),
1608 1625 (lambda: resolvenodes(allnodesrev, 2),
1609 1626 b'look up all nodes 2x (reverse)'),
1610 1627 (lambda: getentries(allrevs),
1611 1628 b'retrieve all index entries (forward)'),
1612 1629 (lambda: getentries(allrevs, 2),
1613 1630 b'retrieve all index entries 2x (forward)'),
1614 1631 (lambda: getentries(allrevsrev),
1615 1632 b'retrieve all index entries (reverse)'),
1616 1633 (lambda: getentries(allrevsrev, 2),
1617 1634 b'retrieve all index entries 2x (reverse)'),
1618 1635 ]
1619 1636
1620 1637 for fn, title in benches:
1621 1638 timer, fm = gettimer(ui, opts)
1622 1639 timer(fn, title=title)
1623 1640 fm.end()
1624 1641
1625 1642 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
1626 1643 [(b'd', b'dist', 100, b'distance between the revisions'),
1627 1644 (b's', b'startrev', 0, b'revision to start reading at'),
1628 1645 (b'', b'reverse', False, b'read in reverse')],
1629 1646 b'-c|-m|FILE')
1630 1647 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
1631 1648 **opts):
1632 1649 """Benchmark reading a series of revisions from a revlog.
1633 1650
1634 1651 By default, we read every ``-d/--dist`` revision from 0 to tip of
1635 1652 the specified revlog.
1636 1653
1637 1654 The start revision can be defined via ``-s/--startrev``.
1638 1655 """
1639 1656 opts = _byteskwargs(opts)
1640 1657
1641 1658 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
1642 1659 rllen = getlen(ui)(rl)
1643 1660
1644 1661 if startrev < 0:
1645 1662 startrev = rllen + startrev
1646 1663
1647 1664 def d():
1648 1665 rl.clearcaches()
1649 1666
1650 1667 beginrev = startrev
1651 1668 endrev = rllen
1652 1669 dist = opts[b'dist']
1653 1670
1654 1671 if reverse:
1655 1672 beginrev, endrev = endrev - 1, beginrev - 1
1656 1673 dist = -1 * dist
1657 1674
1658 1675 for x in _xrange(beginrev, endrev, dist):
1659 1676 # Old revisions don't support passing int.
1660 1677 n = rl.node(x)
1661 1678 rl.revision(n)
1662 1679
1663 1680 timer, fm = gettimer(ui, opts)
1664 1681 timer(d)
1665 1682 fm.end()
1666 1683
1667 1684 @command(b'perfrevlogwrite', revlogopts + formatteropts +
1668 1685 [(b's', b'startrev', 1000, b'revision to start writing at'),
1669 1686 (b'', b'stoprev', -1, b'last revision to write'),
1670 1687 (b'', b'count', 3, b'last revision to write'),
1671 1688 (b'', b'details', False, b'print timing for every revisions tested'),
1672 1689 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
1673 1690 (b'', b'lazydeltabase', True, b'try the provided delta first'),
1674 1691 ],
1675 1692 b'-c|-m|FILE')
1676 1693 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
1677 1694 """Benchmark writing a series of revisions to a revlog.
1678 1695
1679 1696 Possible source values are:
1680 1697 * `full`: add from a full text (default).
1681 1698 * `parent-1`: add from a delta to the first parent
1682 1699 * `parent-2`: add from a delta to the second parent if it exists
1683 1700 (use a delta from the first parent otherwise)
1684 1701 * `parent-smallest`: add from the smallest delta (either p1 or p2)
1685 1702 * `storage`: add from the existing precomputed deltas
1686 1703 """
1687 1704 opts = _byteskwargs(opts)
1688 1705
1689 1706 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
1690 1707 rllen = getlen(ui)(rl)
1691 1708 if startrev < 0:
1692 1709 startrev = rllen + startrev
1693 1710 if stoprev < 0:
1694 1711 stoprev = rllen + stoprev
1695 1712
1696 1713 lazydeltabase = opts['lazydeltabase']
1697 1714 source = opts['source']
1698 1715 validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest',
1699 1716 b'storage')
1700 1717 if source not in validsource:
1701 1718 raise error.Abort('invalid source type: %s' % source)
1702 1719
1703 1720 ### actually gather results
1704 1721 count = opts['count']
1705 1722 if count <= 0:
1706 1723 raise error.Abort('invalide run count: %d' % count)
1707 1724 allresults = []
1708 1725 for c in range(count):
1709 1726 timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1,
1710 1727 lazydeltabase=lazydeltabase)
1711 1728 allresults.append(timing)
1712 1729
1713 1730 ### consolidate the results in a single list
1714 1731 results = []
1715 1732 for idx, (rev, t) in enumerate(allresults[0]):
1716 1733 ts = [t]
1717 1734 for other in allresults[1:]:
1718 1735 orev, ot = other[idx]
1719 1736 assert orev == rev
1720 1737 ts.append(ot)
1721 1738 results.append((rev, ts))
1722 1739 resultcount = len(results)
1723 1740
1724 1741 ### Compute and display relevant statistics
1725 1742
1726 1743 # get a formatter
1727 1744 fm = ui.formatter(b'perf', opts)
1728 1745 displayall = ui.configbool(b"perf", b"all-timing", False)
1729 1746
1730 1747 # print individual details if requested
1731 1748 if opts['details']:
1732 1749 for idx, item in enumerate(results, 1):
1733 1750 rev, data = item
1734 1751 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
1735 1752 formatone(fm, data, title=title, displayall=displayall)
1736 1753
1737 1754 # sorts results by median time
1738 1755 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
1739 1756 # list of (name, index) to display)
1740 1757 relevants = [
1741 1758 ("min", 0),
1742 1759 ("10%", resultcount * 10 // 100),
1743 1760 ("25%", resultcount * 25 // 100),
1744 1761 ("50%", resultcount * 70 // 100),
1745 1762 ("75%", resultcount * 75 // 100),
1746 1763 ("90%", resultcount * 90 // 100),
1747 1764 ("95%", resultcount * 95 // 100),
1748 1765 ("99%", resultcount * 99 // 100),
1749 1766 ("max", -1),
1750 1767 ]
1751 1768 if not ui.quiet:
1752 1769 for name, idx in relevants:
1753 1770 data = results[idx]
1754 1771 title = '%s of %d, rev %d' % (name, resultcount, data[0])
1755 1772 formatone(fm, data[1], title=title, displayall=displayall)
1756 1773
1757 1774 # XXX summing that many float will not be very precise, we ignore this fact
1758 1775 # for now
1759 1776 totaltime = []
1760 1777 for item in allresults:
1761 1778 totaltime.append((sum(x[1][0] for x in item),
1762 1779 sum(x[1][1] for x in item),
1763 1780 sum(x[1][2] for x in item),)
1764 1781 )
1765 1782 formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
1766 1783 displayall=displayall)
1767 1784 fm.end()
1768 1785
1769 1786 class _faketr(object):
1770 1787 def add(s, x, y, z=None):
1771 1788 return None
1772 1789
1773 1790 def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None,
1774 1791 lazydeltabase=True):
1775 1792 timings = []
1776 1793 tr = _faketr()
1777 1794 with _temprevlog(ui, orig, startrev) as dest:
1778 1795 dest._lazydeltabase = lazydeltabase
1779 1796 revs = list(orig.revs(startrev, stoprev))
1780 1797 total = len(revs)
1781 1798 topic = 'adding'
1782 1799 if runidx is not None:
1783 1800 topic += ' (run #%d)' % runidx
1784 1801 for idx, rev in enumerate(revs):
1785 1802 ui.progress(topic, idx, unit='revs', total=total)
1786 1803 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
1787 1804 with timeone() as r:
1788 1805 dest.addrawrevision(*addargs, **addkwargs)
1789 1806 timings.append((rev, r[0]))
1790 1807 ui.progress(topic, total, unit='revs', total=total)
1791 1808 ui.progress(topic, None, unit='revs', total=total)
1792 1809 return timings
1793 1810
1794 1811 def _getrevisionseed(orig, rev, tr, source):
1795 1812 from mercurial.node import nullid
1796 1813
1797 1814 linkrev = orig.linkrev(rev)
1798 1815 node = orig.node(rev)
1799 1816 p1, p2 = orig.parents(node)
1800 1817 flags = orig.flags(rev)
1801 1818 cachedelta = None
1802 1819 text = None
1803 1820
1804 1821 if source == b'full':
1805 1822 text = orig.revision(rev)
1806 1823 elif source == b'parent-1':
1807 1824 baserev = orig.rev(p1)
1808 1825 cachedelta = (baserev, orig.revdiff(p1, rev))
1809 1826 elif source == b'parent-2':
1810 1827 parent = p2
1811 1828 if p2 == nullid:
1812 1829 parent = p1
1813 1830 baserev = orig.rev(parent)
1814 1831 cachedelta = (baserev, orig.revdiff(parent, rev))
1815 1832 elif source == b'parent-smallest':
1816 1833 p1diff = orig.revdiff(p1, rev)
1817 1834 parent = p1
1818 1835 diff = p1diff
1819 1836 if p2 != nullid:
1820 1837 p2diff = orig.revdiff(p2, rev)
1821 1838 if len(p1diff) > len(p2diff):
1822 1839 parent = p2
1823 1840 diff = p2diff
1824 1841 baserev = orig.rev(parent)
1825 1842 cachedelta = (baserev, diff)
1826 1843 elif source == b'storage':
1827 1844 baserev = orig.deltaparent(rev)
1828 1845 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
1829 1846
1830 1847 return ((text, tr, linkrev, p1, p2),
1831 1848 {'node': node, 'flags': flags, 'cachedelta': cachedelta})
1832 1849
1833 1850 @contextlib.contextmanager
1834 1851 def _temprevlog(ui, orig, truncaterev):
1835 1852 from mercurial import vfs as vfsmod
1836 1853
1837 1854 if orig._inline:
1838 1855 raise error.Abort('not supporting inline revlog (yet)')
1839 1856
1840 1857 origindexpath = orig.opener.join(orig.indexfile)
1841 1858 origdatapath = orig.opener.join(orig.datafile)
1842 1859 indexname = 'revlog.i'
1843 1860 dataname = 'revlog.d'
1844 1861
1845 1862 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
1846 1863 try:
1847 1864 # copy the data file in a temporary directory
1848 1865 ui.debug('copying data in %s\n' % tmpdir)
1849 1866 destindexpath = os.path.join(tmpdir, 'revlog.i')
1850 1867 destdatapath = os.path.join(tmpdir, 'revlog.d')
1851 1868 shutil.copyfile(origindexpath, destindexpath)
1852 1869 shutil.copyfile(origdatapath, destdatapath)
1853 1870
1854 1871 # remove the data we want to add again
1855 1872 ui.debug('truncating data to be rewritten\n')
1856 1873 with open(destindexpath, 'ab') as index:
1857 1874 index.seek(0)
1858 1875 index.truncate(truncaterev * orig._io.size)
1859 1876 with open(destdatapath, 'ab') as data:
1860 1877 data.seek(0)
1861 1878 data.truncate(orig.start(truncaterev))
1862 1879
1863 1880 # instantiate a new revlog from the temporary copy
1864 1881 ui.debug('truncating adding to be rewritten\n')
1865 1882 vfs = vfsmod.vfs(tmpdir)
1866 1883 vfs.options = getattr(orig.opener, 'options', None)
1867 1884
1868 1885 dest = revlog.revlog(vfs,
1869 1886 indexfile=indexname,
1870 1887 datafile=dataname)
1871 1888 if dest._inline:
1872 1889 raise error.Abort('not supporting inline revlog (yet)')
1873 1890 # make sure internals are initialized
1874 1891 dest.revision(len(dest) - 1)
1875 1892 yield dest
1876 1893 del dest, vfs
1877 1894 finally:
1878 1895 shutil.rmtree(tmpdir, True)
1879 1896
1880 1897 @command(b'perfrevlogchunks', revlogopts + formatteropts +
1881 1898 [(b'e', b'engines', b'', b'compression engines to use'),
1882 1899 (b's', b'startrev', 0, b'revision to start at')],
1883 1900 b'-c|-m|FILE')
1884 1901 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
1885 1902 """Benchmark operations on revlog chunks.
1886 1903
1887 1904 Logically, each revlog is a collection of fulltext revisions. However,
1888 1905 stored within each revlog are "chunks" of possibly compressed data. This
1889 1906 data needs to be read and decompressed or compressed and written.
1890 1907
1891 1908 This command measures the time it takes to read+decompress and recompress
1892 1909 chunks in a revlog. It effectively isolates I/O and compression performance.
1893 1910 For measurements of higher-level operations like resolving revisions,
1894 1911 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
1895 1912 """
1896 1913 opts = _byteskwargs(opts)
1897 1914
1898 1915 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
1899 1916
1900 1917 # _chunkraw was renamed to _getsegmentforrevs.
1901 1918 try:
1902 1919 segmentforrevs = rl._getsegmentforrevs
1903 1920 except AttributeError:
1904 1921 segmentforrevs = rl._chunkraw
1905 1922
1906 1923 # Verify engines argument.
1907 1924 if engines:
1908 1925 engines = set(e.strip() for e in engines.split(b','))
1909 1926 for engine in engines:
1910 1927 try:
1911 1928 util.compressionengines[engine]
1912 1929 except KeyError:
1913 1930 raise error.Abort(b'unknown compression engine: %s' % engine)
1914 1931 else:
1915 1932 engines = []
1916 1933 for e in util.compengines:
1917 1934 engine = util.compengines[e]
1918 1935 try:
1919 1936 if engine.available():
1920 1937 engine.revlogcompressor().compress(b'dummy')
1921 1938 engines.append(e)
1922 1939 except NotImplementedError:
1923 1940 pass
1924 1941
1925 1942 revs = list(rl.revs(startrev, len(rl) - 1))
1926 1943
1927 1944 def rlfh(rl):
1928 1945 if rl._inline:
1929 1946 return getsvfs(repo)(rl.indexfile)
1930 1947 else:
1931 1948 return getsvfs(repo)(rl.datafile)
1932 1949
1933 1950 def doread():
1934 1951 rl.clearcaches()
1935 1952 for rev in revs:
1936 1953 segmentforrevs(rev, rev)
1937 1954
1938 1955 def doreadcachedfh():
1939 1956 rl.clearcaches()
1940 1957 fh = rlfh(rl)
1941 1958 for rev in revs:
1942 1959 segmentforrevs(rev, rev, df=fh)
1943 1960
1944 1961 def doreadbatch():
1945 1962 rl.clearcaches()
1946 1963 segmentforrevs(revs[0], revs[-1])
1947 1964
1948 1965 def doreadbatchcachedfh():
1949 1966 rl.clearcaches()
1950 1967 fh = rlfh(rl)
1951 1968 segmentforrevs(revs[0], revs[-1], df=fh)
1952 1969
1953 1970 def dochunk():
1954 1971 rl.clearcaches()
1955 1972 fh = rlfh(rl)
1956 1973 for rev in revs:
1957 1974 rl._chunk(rev, df=fh)
1958 1975
1959 1976 chunks = [None]
1960 1977
1961 1978 def dochunkbatch():
1962 1979 rl.clearcaches()
1963 1980 fh = rlfh(rl)
1964 1981 # Save chunks as a side-effect.
1965 1982 chunks[0] = rl._chunks(revs, df=fh)
1966 1983
1967 1984 def docompress(compressor):
1968 1985 rl.clearcaches()
1969 1986
1970 1987 try:
1971 1988 # Swap in the requested compression engine.
1972 1989 oldcompressor = rl._compressor
1973 1990 rl._compressor = compressor
1974 1991 for chunk in chunks[0]:
1975 1992 rl.compress(chunk)
1976 1993 finally:
1977 1994 rl._compressor = oldcompressor
1978 1995
1979 1996 benches = [
1980 1997 (lambda: doread(), b'read'),
1981 1998 (lambda: doreadcachedfh(), b'read w/ reused fd'),
1982 1999 (lambda: doreadbatch(), b'read batch'),
1983 2000 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
1984 2001 (lambda: dochunk(), b'chunk'),
1985 2002 (lambda: dochunkbatch(), b'chunk batch'),
1986 2003 ]
1987 2004
1988 2005 for engine in sorted(engines):
1989 2006 compressor = util.compengines[engine].revlogcompressor()
1990 2007 benches.append((functools.partial(docompress, compressor),
1991 2008 b'compress w/ %s' % engine))
1992 2009
1993 2010 for fn, title in benches:
1994 2011 timer, fm = gettimer(ui, opts)
1995 2012 timer(fn, title=title)
1996 2013 fm.end()
1997 2014
1998 2015 @command(b'perfrevlogrevision', revlogopts + formatteropts +
1999 2016 [(b'', b'cache', False, b'use caches instead of clearing')],
2000 2017 b'-c|-m|FILE REV')
2001 2018 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
2002 2019 """Benchmark obtaining a revlog revision.
2003 2020
2004 2021 Obtaining a revlog revision consists of roughly the following steps:
2005 2022
2006 2023 1. Compute the delta chain
2007 2024 2. Slice the delta chain if applicable
2008 2025 3. Obtain the raw chunks for that delta chain
2009 2026 4. Decompress each raw chunk
2010 2027 5. Apply binary patches to obtain fulltext
2011 2028 6. Verify hash of fulltext
2012 2029
2013 2030 This command measures the time spent in each of these phases.
2014 2031 """
2015 2032 opts = _byteskwargs(opts)
2016 2033
2017 2034 if opts.get(b'changelog') or opts.get(b'manifest'):
2018 2035 file_, rev = None, file_
2019 2036 elif rev is None:
2020 2037 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
2021 2038
2022 2039 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
2023 2040
2024 2041 # _chunkraw was renamed to _getsegmentforrevs.
2025 2042 try:
2026 2043 segmentforrevs = r._getsegmentforrevs
2027 2044 except AttributeError:
2028 2045 segmentforrevs = r._chunkraw
2029 2046
2030 2047 node = r.lookup(rev)
2031 2048 rev = r.rev(node)
2032 2049
2033 2050 def getrawchunks(data, chain):
2034 2051 start = r.start
2035 2052 length = r.length
2036 2053 inline = r._inline
2037 2054 iosize = r._io.size
2038 2055 buffer = util.buffer
2039 2056
2040 2057 chunks = []
2041 2058 ladd = chunks.append
2042 2059 for idx, item in enumerate(chain):
2043 2060 offset = start(item[0])
2044 2061 bits = data[idx]
2045 2062 for rev in item:
2046 2063 chunkstart = start(rev)
2047 2064 if inline:
2048 2065 chunkstart += (rev + 1) * iosize
2049 2066 chunklength = length(rev)
2050 2067 ladd(buffer(bits, chunkstart - offset, chunklength))
2051 2068
2052 2069 return chunks
2053 2070
2054 2071 def dodeltachain(rev):
2055 2072 if not cache:
2056 2073 r.clearcaches()
2057 2074 r._deltachain(rev)
2058 2075
2059 2076 def doread(chain):
2060 2077 if not cache:
2061 2078 r.clearcaches()
2062 2079 for item in slicedchain:
2063 2080 segmentforrevs(item[0], item[-1])
2064 2081
2065 2082 def doslice(r, chain, size):
2066 2083 for s in slicechunk(r, chain, targetsize=size):
2067 2084 pass
2068 2085
2069 2086 def dorawchunks(data, chain):
2070 2087 if not cache:
2071 2088 r.clearcaches()
2072 2089 getrawchunks(data, chain)
2073 2090
2074 2091 def dodecompress(chunks):
2075 2092 decomp = r.decompress
2076 2093 for chunk in chunks:
2077 2094 decomp(chunk)
2078 2095
2079 2096 def dopatch(text, bins):
2080 2097 if not cache:
2081 2098 r.clearcaches()
2082 2099 mdiff.patches(text, bins)
2083 2100
2084 2101 def dohash(text):
2085 2102 if not cache:
2086 2103 r.clearcaches()
2087 2104 r.checkhash(text, node, rev=rev)
2088 2105
2089 2106 def dorevision():
2090 2107 if not cache:
2091 2108 r.clearcaches()
2092 2109 r.revision(node)
2093 2110
2094 2111 try:
2095 2112 from mercurial.revlogutils.deltas import slicechunk
2096 2113 except ImportError:
2097 2114 slicechunk = getattr(revlog, '_slicechunk', None)
2098 2115
2099 2116 size = r.length(rev)
2100 2117 chain = r._deltachain(rev)[0]
2101 2118 if not getattr(r, '_withsparseread', False):
2102 2119 slicedchain = (chain,)
2103 2120 else:
2104 2121 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
2105 2122 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
2106 2123 rawchunks = getrawchunks(data, slicedchain)
2107 2124 bins = r._chunks(chain)
2108 2125 text = bytes(bins[0])
2109 2126 bins = bins[1:]
2110 2127 text = mdiff.patches(text, bins)
2111 2128
2112 2129 benches = [
2113 2130 (lambda: dorevision(), b'full'),
2114 2131 (lambda: dodeltachain(rev), b'deltachain'),
2115 2132 (lambda: doread(chain), b'read'),
2116 2133 ]
2117 2134
2118 2135 if getattr(r, '_withsparseread', False):
2119 2136 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
2120 2137 benches.append(slicing)
2121 2138
2122 2139 benches.extend([
2123 2140 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
2124 2141 (lambda: dodecompress(rawchunks), b'decompress'),
2125 2142 (lambda: dopatch(text, bins), b'patch'),
2126 2143 (lambda: dohash(text), b'hash'),
2127 2144 ])
2128 2145
2129 2146 timer, fm = gettimer(ui, opts)
2130 2147 for fn, title in benches:
2131 2148 timer(fn, title=title)
2132 2149 fm.end()
2133 2150
2134 2151 @command(b'perfrevset',
2135 2152 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
2136 2153 (b'', b'contexts', False, b'obtain changectx for each revision')]
2137 2154 + formatteropts, b"REVSET")
2138 2155 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
2139 2156 """benchmark the execution time of a revset
2140 2157
2141 2158 Use the --clean option if need to evaluate the impact of build volatile
2142 2159 revisions set cache on the revset execution. Volatile cache hold filtered
2143 2160 and obsolete related cache."""
2144 2161 opts = _byteskwargs(opts)
2145 2162
2146 2163 timer, fm = gettimer(ui, opts)
2147 2164 def d():
2148 2165 if clear:
2149 2166 repo.invalidatevolatilesets()
2150 2167 if contexts:
2151 2168 for ctx in repo.set(expr): pass
2152 2169 else:
2153 2170 for r in repo.revs(expr): pass
2154 2171 timer(d)
2155 2172 fm.end()
2156 2173
2157 2174 @command(b'perfvolatilesets',
2158 2175 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
2159 2176 ] + formatteropts)
2160 2177 def perfvolatilesets(ui, repo, *names, **opts):
2161 2178 """benchmark the computation of various volatile set
2162 2179
2163 2180 Volatile set computes element related to filtering and obsolescence."""
2164 2181 opts = _byteskwargs(opts)
2165 2182 timer, fm = gettimer(ui, opts)
2166 2183 repo = repo.unfiltered()
2167 2184
2168 2185 def getobs(name):
2169 2186 def d():
2170 2187 repo.invalidatevolatilesets()
2171 2188 if opts[b'clear_obsstore']:
2172 2189 clearfilecache(repo, b'obsstore')
2173 2190 obsolete.getrevs(repo, name)
2174 2191 return d
2175 2192
2176 2193 allobs = sorted(obsolete.cachefuncs)
2177 2194 if names:
2178 2195 allobs = [n for n in allobs if n in names]
2179 2196
2180 2197 for name in allobs:
2181 2198 timer(getobs(name), title=name)
2182 2199
2183 2200 def getfiltered(name):
2184 2201 def d():
2185 2202 repo.invalidatevolatilesets()
2186 2203 if opts[b'clear_obsstore']:
2187 2204 clearfilecache(repo, b'obsstore')
2188 2205 repoview.filterrevs(repo, name)
2189 2206 return d
2190 2207
2191 2208 allfilter = sorted(repoview.filtertable)
2192 2209 if names:
2193 2210 allfilter = [n for n in allfilter if n in names]
2194 2211
2195 2212 for name in allfilter:
2196 2213 timer(getfiltered(name), title=name)
2197 2214 fm.end()
2198 2215
2199 2216 @command(b'perfbranchmap',
2200 2217 [(b'f', b'full', False,
2201 2218 b'Includes build time of subset'),
2202 2219 (b'', b'clear-revbranch', False,
2203 2220 b'purge the revbranch cache between computation'),
2204 2221 ] + formatteropts)
2205 2222 def perfbranchmap(ui, repo, *filternames, **opts):
2206 2223 """benchmark the update of a branchmap
2207 2224
2208 2225 This benchmarks the full repo.branchmap() call with read and write disabled
2209 2226 """
2210 2227 opts = _byteskwargs(opts)
2211 2228 full = opts.get(b"full", False)
2212 2229 clear_revbranch = opts.get(b"clear_revbranch", False)
2213 2230 timer, fm = gettimer(ui, opts)
2214 2231 def getbranchmap(filtername):
2215 2232 """generate a benchmark function for the filtername"""
2216 2233 if filtername is None:
2217 2234 view = repo
2218 2235 else:
2219 2236 view = repo.filtered(filtername)
2220 2237 def d():
2221 2238 if clear_revbranch:
2222 2239 repo.revbranchcache()._clear()
2223 2240 if full:
2224 2241 view._branchcaches.clear()
2225 2242 else:
2226 2243 view._branchcaches.pop(filtername, None)
2227 2244 view.branchmap()
2228 2245 return d
2229 2246 # add filter in smaller subset to bigger subset
2230 2247 possiblefilters = set(repoview.filtertable)
2231 2248 if filternames:
2232 2249 possiblefilters &= set(filternames)
2233 2250 subsettable = getbranchmapsubsettable()
2234 2251 allfilters = []
2235 2252 while possiblefilters:
2236 2253 for name in possiblefilters:
2237 2254 subset = subsettable.get(name)
2238 2255 if subset not in possiblefilters:
2239 2256 break
2240 2257 else:
2241 2258 assert False, b'subset cycle %s!' % possiblefilters
2242 2259 allfilters.append(name)
2243 2260 possiblefilters.remove(name)
2244 2261
2245 2262 # warm the cache
2246 2263 if not full:
2247 2264 for name in allfilters:
2248 2265 repo.filtered(name).branchmap()
2249 2266 if not filternames or b'unfiltered' in filternames:
2250 2267 # add unfiltered
2251 2268 allfilters.append(None)
2252 2269
2253 2270 branchcacheread = safeattrsetter(branchmap, b'read')
2254 2271 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
2255 2272 branchcacheread.set(lambda repo: None)
2256 2273 branchcachewrite.set(lambda bc, repo: None)
2257 2274 try:
2258 2275 for name in allfilters:
2259 2276 printname = name
2260 2277 if name is None:
2261 2278 printname = b'unfiltered'
2262 2279 timer(getbranchmap(name), title=str(printname))
2263 2280 finally:
2264 2281 branchcacheread.restore()
2265 2282 branchcachewrite.restore()
2266 2283 fm.end()
2267 2284
2268 2285 @command(b'perfbranchmapload', [
2269 2286 (b'f', b'filter', b'', b'Specify repoview filter'),
2270 2287 (b'', b'list', False, b'List brachmap filter caches'),
2271 2288 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
2272 2289
2273 2290 ] + formatteropts)
2274 2291 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
2275 2292 """benchmark reading the branchmap"""
2276 2293 opts = _byteskwargs(opts)
2277 2294 clearrevlogs = opts[b'clear_revlogs']
2278 2295
2279 2296 if list:
2280 2297 for name, kind, st in repo.cachevfs.readdir(stat=True):
2281 2298 if name.startswith(b'branch2'):
2282 2299 filtername = name.partition(b'-')[2] or b'unfiltered'
2283 2300 ui.status(b'%s - %s\n'
2284 2301 % (filtername, util.bytecount(st.st_size)))
2285 2302 return
2286 2303 if not filter:
2287 2304 filter = None
2288 2305 subsettable = getbranchmapsubsettable()
2289 2306 if filter is None:
2290 2307 repo = repo.unfiltered()
2291 2308 else:
2292 2309 repo = repoview.repoview(repo, filter)
2293 2310
2294 2311 repo.branchmap() # make sure we have a relevant, up to date branchmap
2295 2312
2296 2313 currentfilter = filter
2297 2314 # try once without timer, the filter may not be cached
2298 2315 while branchmap.read(repo) is None:
2299 2316 currentfilter = subsettable.get(currentfilter)
2300 2317 if currentfilter is None:
2301 2318 raise error.Abort(b'No branchmap cached for %s repo'
2302 2319 % (filter or b'unfiltered'))
2303 2320 repo = repo.filtered(currentfilter)
2304 2321 timer, fm = gettimer(ui, opts)
2305 2322 def setup():
2306 2323 if clearrevlogs:
2307 2324 clearchangelog(repo)
2308 2325 def bench():
2309 2326 branchmap.read(repo)
2310 2327 timer(bench, setup=setup)
2311 2328 fm.end()
2312 2329
2313 2330 @command(b'perfloadmarkers')
2314 2331 def perfloadmarkers(ui, repo):
2315 2332 """benchmark the time to parse the on-disk markers for a repo
2316 2333
2317 2334 Result is the number of markers in the repo."""
2318 2335 timer, fm = gettimer(ui)
2319 2336 svfs = getsvfs(repo)
2320 2337 timer(lambda: len(obsolete.obsstore(svfs)))
2321 2338 fm.end()
2322 2339
2323 2340 @command(b'perflrucachedict', formatteropts +
2324 2341 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
2325 2342 (b'', b'mincost', 0, b'smallest cost of items in cache'),
2326 2343 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
2327 2344 (b'', b'size', 4, b'size of cache'),
2328 2345 (b'', b'gets', 10000, b'number of key lookups'),
2329 2346 (b'', b'sets', 10000, b'number of key sets'),
2330 2347 (b'', b'mixed', 10000, b'number of mixed mode operations'),
2331 2348 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
2332 2349 norepo=True)
2333 2350 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
2334 2351 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
2335 2352 opts = _byteskwargs(opts)
2336 2353
2337 2354 def doinit():
2338 2355 for i in _xrange(10000):
2339 2356 util.lrucachedict(size)
2340 2357
2341 2358 costrange = list(range(mincost, maxcost + 1))
2342 2359
2343 2360 values = []
2344 2361 for i in _xrange(size):
2345 2362 values.append(random.randint(0, _maxint))
2346 2363
2347 2364 # Get mode fills the cache and tests raw lookup performance with no
2348 2365 # eviction.
2349 2366 getseq = []
2350 2367 for i in _xrange(gets):
2351 2368 getseq.append(random.choice(values))
2352 2369
2353 2370 def dogets():
2354 2371 d = util.lrucachedict(size)
2355 2372 for v in values:
2356 2373 d[v] = v
2357 2374 for key in getseq:
2358 2375 value = d[key]
2359 2376 value # silence pyflakes warning
2360 2377
2361 2378 def dogetscost():
2362 2379 d = util.lrucachedict(size, maxcost=costlimit)
2363 2380 for i, v in enumerate(values):
2364 2381 d.insert(v, v, cost=costs[i])
2365 2382 for key in getseq:
2366 2383 try:
2367 2384 value = d[key]
2368 2385 value # silence pyflakes warning
2369 2386 except KeyError:
2370 2387 pass
2371 2388
2372 2389 # Set mode tests insertion speed with cache eviction.
2373 2390 setseq = []
2374 2391 costs = []
2375 2392 for i in _xrange(sets):
2376 2393 setseq.append(random.randint(0, _maxint))
2377 2394 costs.append(random.choice(costrange))
2378 2395
2379 2396 def doinserts():
2380 2397 d = util.lrucachedict(size)
2381 2398 for v in setseq:
2382 2399 d.insert(v, v)
2383 2400
2384 2401 def doinsertscost():
2385 2402 d = util.lrucachedict(size, maxcost=costlimit)
2386 2403 for i, v in enumerate(setseq):
2387 2404 d.insert(v, v, cost=costs[i])
2388 2405
2389 2406 def dosets():
2390 2407 d = util.lrucachedict(size)
2391 2408 for v in setseq:
2392 2409 d[v] = v
2393 2410
2394 2411 # Mixed mode randomly performs gets and sets with eviction.
2395 2412 mixedops = []
2396 2413 for i in _xrange(mixed):
2397 2414 r = random.randint(0, 100)
2398 2415 if r < mixedgetfreq:
2399 2416 op = 0
2400 2417 else:
2401 2418 op = 1
2402 2419
2403 2420 mixedops.append((op,
2404 2421 random.randint(0, size * 2),
2405 2422 random.choice(costrange)))
2406 2423
2407 2424 def domixed():
2408 2425 d = util.lrucachedict(size)
2409 2426
2410 2427 for op, v, cost in mixedops:
2411 2428 if op == 0:
2412 2429 try:
2413 2430 d[v]
2414 2431 except KeyError:
2415 2432 pass
2416 2433 else:
2417 2434 d[v] = v
2418 2435
2419 2436 def domixedcost():
2420 2437 d = util.lrucachedict(size, maxcost=costlimit)
2421 2438
2422 2439 for op, v, cost in mixedops:
2423 2440 if op == 0:
2424 2441 try:
2425 2442 d[v]
2426 2443 except KeyError:
2427 2444 pass
2428 2445 else:
2429 2446 d.insert(v, v, cost=cost)
2430 2447
2431 2448 benches = [
2432 2449 (doinit, b'init'),
2433 2450 ]
2434 2451
2435 2452 if costlimit:
2436 2453 benches.extend([
2437 2454 (dogetscost, b'gets w/ cost limit'),
2438 2455 (doinsertscost, b'inserts w/ cost limit'),
2439 2456 (domixedcost, b'mixed w/ cost limit'),
2440 2457 ])
2441 2458 else:
2442 2459 benches.extend([
2443 2460 (dogets, b'gets'),
2444 2461 (doinserts, b'inserts'),
2445 2462 (dosets, b'sets'),
2446 2463 (domixed, b'mixed')
2447 2464 ])
2448 2465
2449 2466 for fn, title in benches:
2450 2467 timer, fm = gettimer(ui, opts)
2451 2468 timer(fn, title=title)
2452 2469 fm.end()
2453 2470
2454 2471 @command(b'perfwrite', formatteropts)
2455 2472 def perfwrite(ui, repo, **opts):
2456 2473 """microbenchmark ui.write
2457 2474 """
2458 2475 opts = _byteskwargs(opts)
2459 2476
2460 2477 timer, fm = gettimer(ui, opts)
2461 2478 def write():
2462 2479 for i in range(100000):
2463 2480 ui.write((b'Testing write performance\n'))
2464 2481 timer(write)
2465 2482 fm.end()
2466 2483
2467 2484 def uisetup(ui):
2468 2485 if (util.safehasattr(cmdutil, b'openrevlog') and
2469 2486 not util.safehasattr(commands, b'debugrevlogopts')):
2470 2487 # for "historical portability":
2471 2488 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
2472 2489 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
2473 2490 # openrevlog() should cause failure, because it has been
2474 2491 # available since 3.5 (or 49c583ca48c4).
2475 2492 def openrevlog(orig, repo, cmd, file_, opts):
2476 2493 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
2477 2494 raise error.Abort(b"This version doesn't support --dir option",
2478 2495 hint=b"use 3.5 or later")
2479 2496 return orig(repo, cmd, file_, opts)
2480 2497 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
@@ -1,287 +1,289 b''
1 1 #require test-repo
2 2
3 3 Set vars:
4 4
5 5 $ . "$TESTDIR/helpers-testrepo.sh"
6 6 $ CONTRIBDIR="$TESTDIR/../contrib"
7 7
8 8 Prepare repo:
9 9
10 10 $ hg init
11 11
12 12 $ echo this is file a > a
13 13 $ hg add a
14 14 $ hg commit -m first
15 15
16 16 $ echo adding to file a >> a
17 17 $ hg commit -m second
18 18
19 19 $ echo adding more to file a >> a
20 20 $ hg commit -m third
21 21
22 22 $ hg up -r 0
23 23 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
24 24 $ echo merge-this >> a
25 25 $ hg commit -m merge-able
26 26 created new head
27 27
28 28 $ hg up -r 2
29 29 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
30 30
31 31 perfstatus
32 32
33 33 $ cat >> $HGRCPATH << EOF
34 34 > [extensions]
35 35 > perfstatusext=$CONTRIBDIR/perf.py
36 36 > [perf]
37 37 > presleep=0
38 38 > stub=on
39 39 > parentscount=1
40 40 > EOF
41 41 $ hg help perfstatusext
42 42 perfstatusext extension - helper extension to measure performance
43 43
44 44 list of commands:
45 45
46 46 perfaddremove
47 47 (no help text available)
48 48 perfancestors
49 49 (no help text available)
50 50 perfancestorset
51 51 (no help text available)
52 52 perfannotate (no help text available)
53 53 perfbdiff benchmark a bdiff between revisions
54 54 perfbookmarks
55 55 benchmark parsing bookmarks from disk to memory
56 56 perfbranchmap
57 57 benchmark the update of a branchmap
58 58 perfbranchmapload
59 59 benchmark reading the branchmap
60 60 perfbundleread
61 61 Benchmark reading of bundle files.
62 62 perfcca (no help text available)
63 63 perfchangegroupchangelog
64 64 Benchmark producing a changelog group for a changegroup.
65 65 perfchangeset
66 66 (no help text available)
67 67 perfctxfiles (no help text available)
68 68 perfdiffwd Profile diff of working directory changes
69 69 perfdirfoldmap
70 70 (no help text available)
71 71 perfdirs (no help text available)
72 72 perfdirstate (no help text available)
73 73 perfdirstatedirs
74 74 (no help text available)
75 75 perfdirstatefoldmap
76 76 (no help text available)
77 77 perfdirstatewrite
78 78 (no help text available)
79 79 perffncacheencode
80 80 (no help text available)
81 81 perffncacheload
82 82 (no help text available)
83 83 perffncachewrite
84 84 (no help text available)
85 85 perfheads (no help text available)
86 86 perfhelper-pathcopies
87 87 find statistic about potential parameters for the
88 88 'perftracecopies'
89 perfignore benchmark operation related to computing ignore
89 90 perfindex (no help text available)
90 91 perflinelogedits
91 92 (no help text available)
92 93 perfloadmarkers
93 94 benchmark the time to parse the on-disk markers for a repo
94 95 perflog (no help text available)
95 96 perflookup (no help text available)
96 97 perflrucachedict
97 98 (no help text available)
98 99 perfmanifest benchmark the time to read a manifest from disk and return a
99 100 usable
100 101 perfmergecalculate
101 102 (no help text available)
102 103 perfmoonwalk benchmark walking the changelog backwards
103 104 perfnodelookup
104 105 (no help text available)
105 106 perfparents (no help text available)
106 107 perfpathcopies
107 108 benchmark the copy tracing logic
108 109 perfphases benchmark phasesets computation
109 110 perfphasesremote
110 111 benchmark time needed to analyse phases of the remote server
111 112 perfrawfiles (no help text available)
112 113 perfrevlogchunks
113 114 Benchmark operations on revlog chunks.
114 115 perfrevlogindex
115 116 Benchmark operations against a revlog index.
116 117 perfrevlogrevision
117 118 Benchmark obtaining a revlog revision.
118 119 perfrevlogrevisions
119 120 Benchmark reading a series of revisions from a revlog.
120 121 perfrevlogwrite
121 122 Benchmark writing a series of revisions to a revlog.
122 123 perfrevrange (no help text available)
123 124 perfrevset benchmark the execution time of a revset
124 125 perfstartup (no help text available)
125 126 perfstatus (no help text available)
126 127 perftags (no help text available)
127 128 perftemplating
128 129 test the rendering time of a given template
129 130 perfunidiff benchmark a unified diff between revisions
130 131 perfvolatilesets
131 132 benchmark the computation of various volatile set
132 133 perfwalk (no help text available)
133 134 perfwrite microbenchmark ui.write
134 135
135 136 (use 'hg help -v perfstatusext' to show built-in aliases and global options)
136 137 $ hg perfaddremove
137 138 $ hg perfancestors
138 139 $ hg perfancestorset 2
139 140 $ hg perfannotate a
140 141 $ hg perfbdiff -c 1
141 142 $ hg perfbdiff --alldata 1
142 143 $ hg perfunidiff -c 1
143 144 $ hg perfunidiff --alldata 1
144 145 $ hg perfbookmarks
145 146 $ hg perfbranchmap
146 147 $ hg perfbranchmapload
147 148 $ hg perfcca
148 149 $ hg perfchangegroupchangelog
149 150 $ hg perfchangegroupchangelog --cgversion 01
150 151 $ hg perfchangeset 2
151 152 $ hg perfctxfiles 2
152 153 $ hg perfdiffwd
153 154 $ hg perfdirfoldmap
154 155 $ hg perfdirs
155 156 $ hg perfdirstate
156 157 $ hg perfdirstatedirs
157 158 $ hg perfdirstatefoldmap
158 159 $ hg perfdirstatewrite
159 160 #if repofncache
160 161 $ hg perffncacheencode
161 162 $ hg perffncacheload
162 163 $ hg debugrebuildfncache
163 164 fncache already up to date
164 165 $ hg perffncachewrite
165 166 $ hg debugrebuildfncache
166 167 fncache already up to date
167 168 #endif
168 169 $ hg perfheads
170 $ hg perfignore
169 171 $ hg perfindex
170 172 $ hg perflinelogedits -n 1
171 173 $ hg perfloadmarkers
172 174 $ hg perflog
173 175 $ hg perflookup 2
174 176 $ hg perflrucache
175 177 $ hg perfmanifest 2
176 178 $ hg perfmanifest -m 44fe2c8352bb3a478ffd7d8350bbc721920134d1
177 179 $ hg perfmanifest -m 44fe2c8352bb
178 180 abort: manifest revision must be integer or full node
179 181 [255]
180 182 $ hg perfmergecalculate -r 3
181 183 $ hg perfmoonwalk
182 184 $ hg perfnodelookup 2
183 185 $ hg perfpathcopies 1 2
184 186 $ hg perfrawfiles 2
185 187 $ hg perfrevlogindex -c
186 188 #if reporevlogstore
187 189 $ hg perfrevlogrevisions .hg/store/data/a.i
188 190 #endif
189 191 $ hg perfrevlogrevision -m 0
190 192 $ hg perfrevlogchunks -c
191 193 $ hg perfrevrange
192 194 $ hg perfrevset 'all()'
193 195 $ hg perfstartup
194 196 $ hg perfstatus
195 197 $ hg perftags
196 198 $ hg perftemplating
197 199 $ hg perfvolatilesets
198 200 $ hg perfwalk
199 201 $ hg perfparents
200 202
201 203 test actual output
202 204 ------------------
203 205
204 206 normal output:
205 207
206 208 $ hg perfheads --config perf.stub=no
207 209 ! wall * comb * user * sys * (best of *) (glob)
208 210
209 211 detailed output:
210 212
211 213 $ hg perfheads --config perf.all-timing=yes --config perf.stub=no
212 214 ! wall * comb * user * sys * (best of *) (glob)
213 215 ! wall * comb * user * sys * (max of *) (glob)
214 216 ! wall * comb * user * sys * (avg of *) (glob)
215 217 ! wall * comb * user * sys * (median of *) (glob)
216 218
217 219 test json output
218 220 ----------------
219 221
220 222 normal output:
221 223
222 224 $ hg perfheads --template json --config perf.stub=no
223 225 [
224 226 {
225 227 "comb": *, (glob)
226 228 "count": *, (glob)
227 229 "sys": *, (glob)
228 230 "user": *, (glob)
229 231 "wall": * (glob)
230 232 }
231 233 ]
232 234
233 235 detailed output:
234 236
235 237 $ hg perfheads --template json --config perf.all-timing=yes --config perf.stub=no
236 238 [
237 239 {
238 240 "avg.comb": *, (glob)
239 241 "avg.count": *, (glob)
240 242 "avg.sys": *, (glob)
241 243 "avg.user": *, (glob)
242 244 "avg.wall": *, (glob)
243 245 "comb": *, (glob)
244 246 "count": *, (glob)
245 247 "max.comb": *, (glob)
246 248 "max.count": *, (glob)
247 249 "max.sys": *, (glob)
248 250 "max.user": *, (glob)
249 251 "max.wall": *, (glob)
250 252 "median.comb": *, (glob)
251 253 "median.count": *, (glob)
252 254 "median.sys": *, (glob)
253 255 "median.user": *, (glob)
254 256 "median.wall": *, (glob)
255 257 "sys": *, (glob)
256 258 "user": *, (glob)
257 259 "wall": * (glob)
258 260 }
259 261 ]
260 262
261 263 Check perf.py for historical portability
262 264 ----------------------------------------
263 265
264 266 $ cd "$TESTDIR/.."
265 267
266 268 $ (testrepohg files -r 1.2 glob:mercurial/*.c glob:mercurial/*.py;
267 269 > testrepohg files -r tip glob:mercurial/*.c glob:mercurial/*.py) |
268 270 > "$TESTDIR"/check-perf-code.py contrib/perf.py
269 271 contrib/perf.py:\d+: (re)
270 272 > from mercurial import (
271 273 import newer module separately in try clause for early Mercurial
272 274 contrib/perf.py:\d+: (re)
273 275 > from mercurial import (
274 276 import newer module separately in try clause for early Mercurial
275 277 contrib/perf.py:\d+: (re)
276 278 > origindexpath = orig.opener.join(orig.indexfile)
277 279 use getvfs()/getsvfs() for early Mercurial
278 280 contrib/perf.py:\d+: (re)
279 281 > origdatapath = orig.opener.join(orig.datafile)
280 282 use getvfs()/getsvfs() for early Mercurial
281 283 contrib/perf.py:\d+: (re)
282 284 > vfs = vfsmod.vfs(tmpdir)
283 285 use getvfs()/getsvfs() for early Mercurial
284 286 contrib/perf.py:\d+: (re)
285 287 > vfs.options = getattr(orig.opener, 'options', None)
286 288 use getvfs()/getsvfs() for early Mercurial
287 289 [1]
General Comments 0
You need to be logged in to leave comments. Login now