##// END OF EJS Templates
perf: support looking up multiple revisions...
Boris Feld -
r41484:7eb7637e default
parent child Browse files
Show More
@@ -1,2687 +1,2701
1 1 # perf.py - performance test routines
2 2 '''helper extension to measure performance'''
3 3
4 4 # "historical portability" policy of perf.py:
5 5 #
6 6 # We have to do:
7 7 # - make perf.py "loadable" with as wide Mercurial version as possible
8 8 # This doesn't mean that perf commands work correctly with that Mercurial.
9 9 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
10 10 # - make historical perf command work correctly with as wide Mercurial
11 11 # version as possible
12 12 #
13 13 # We have to do, if possible with reasonable cost:
14 14 # - make recent perf command for historical feature work correctly
15 15 # with early Mercurial
16 16 #
17 17 # We don't have to do:
18 18 # - make perf command for recent feature work correctly with early
19 19 # Mercurial
20 20
21 21 from __future__ import absolute_import
22 22 import contextlib
23 23 import functools
24 24 import gc
25 25 import os
26 26 import random
27 27 import shutil
28 28 import struct
29 29 import sys
30 30 import tempfile
31 31 import threading
32 32 import time
33 33 from mercurial import (
34 34 changegroup,
35 35 cmdutil,
36 36 commands,
37 37 copies,
38 38 error,
39 39 extensions,
40 40 hg,
41 41 mdiff,
42 42 merge,
43 43 revlog,
44 44 util,
45 45 )
46 46
47 47 # for "historical portability":
48 48 # try to import modules separately (in dict order), and ignore
49 49 # failure, because these aren't available with early Mercurial
50 50 try:
51 51 from mercurial import branchmap # since 2.5 (or bcee63733aad)
52 52 except ImportError:
53 53 pass
54 54 try:
55 55 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
56 56 except ImportError:
57 57 pass
58 58 try:
59 59 from mercurial import registrar # since 3.7 (or 37d50250b696)
60 60 dir(registrar) # forcibly load it
61 61 except ImportError:
62 62 registrar = None
63 63 try:
64 64 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
65 65 except ImportError:
66 66 pass
67 67 try:
68 68 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
69 69 except ImportError:
70 70 pass
71 71 try:
72 72 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
73 73 except ImportError:
74 74 pass
75 75
76 76
77 77 def identity(a):
78 78 return a
79 79
80 80 try:
81 81 from mercurial import pycompat
82 82 getargspec = pycompat.getargspec # added to module after 4.5
83 83 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
84 84 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
85 85 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
86 86 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
87 87 if pycompat.ispy3:
88 88 _maxint = sys.maxsize # per py3 docs for replacing maxint
89 89 else:
90 90 _maxint = sys.maxint
91 91 except (ImportError, AttributeError):
92 92 import inspect
93 93 getargspec = inspect.getargspec
94 94 _byteskwargs = identity
95 95 fsencode = identity # no py3 support
96 96 _maxint = sys.maxint # no py3 support
97 97 _sysstr = lambda x: x # no py3 support
98 98 _xrange = xrange
99 99
100 100 try:
101 101 # 4.7+
102 102 queue = pycompat.queue.Queue
103 103 except (AttributeError, ImportError):
104 104 # <4.7.
105 105 try:
106 106 queue = pycompat.queue
107 107 except (AttributeError, ImportError):
108 108 queue = util.queue
109 109
110 110 try:
111 111 from mercurial import logcmdutil
112 112 makelogtemplater = logcmdutil.maketemplater
113 113 except (AttributeError, ImportError):
114 114 try:
115 115 makelogtemplater = cmdutil.makelogtemplater
116 116 except (AttributeError, ImportError):
117 117 makelogtemplater = None
118 118
119 119 # for "historical portability":
120 120 # define util.safehasattr forcibly, because util.safehasattr has been
121 121 # available since 1.9.3 (or 94b200a11cf7)
122 122 _undefined = object()
123 123 def safehasattr(thing, attr):
124 124 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
125 125 setattr(util, 'safehasattr', safehasattr)
126 126
127 127 # for "historical portability":
128 128 # define util.timer forcibly, because util.timer has been available
129 129 # since ae5d60bb70c9
130 130 if safehasattr(time, 'perf_counter'):
131 131 util.timer = time.perf_counter
132 132 elif os.name == b'nt':
133 133 util.timer = time.clock
134 134 else:
135 135 util.timer = time.time
136 136
137 137 # for "historical portability":
138 138 # use locally defined empty option list, if formatteropts isn't
139 139 # available, because commands.formatteropts has been available since
140 140 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
141 141 # available since 2.2 (or ae5f92e154d3)
142 142 formatteropts = getattr(cmdutil, "formatteropts",
143 143 getattr(commands, "formatteropts", []))
144 144
145 145 # for "historical portability":
146 146 # use locally defined option list, if debugrevlogopts isn't available,
147 147 # because commands.debugrevlogopts has been available since 3.7 (or
148 148 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
149 149 # since 1.9 (or a79fea6b3e77).
150 150 revlogopts = getattr(cmdutil, "debugrevlogopts",
151 151 getattr(commands, "debugrevlogopts", [
152 152 (b'c', b'changelog', False, (b'open changelog')),
153 153 (b'm', b'manifest', False, (b'open manifest')),
154 154 (b'', b'dir', False, (b'open directory manifest')),
155 155 ]))
156 156
157 157 cmdtable = {}
158 158
159 159 # for "historical portability":
160 160 # define parsealiases locally, because cmdutil.parsealiases has been
161 161 # available since 1.5 (or 6252852b4332)
162 162 def parsealiases(cmd):
163 163 return cmd.split(b"|")
164 164
165 165 if safehasattr(registrar, 'command'):
166 166 command = registrar.command(cmdtable)
167 167 elif safehasattr(cmdutil, 'command'):
168 168 command = cmdutil.command(cmdtable)
169 169 if b'norepo' not in getargspec(command).args:
170 170 # for "historical portability":
171 171 # wrap original cmdutil.command, because "norepo" option has
172 172 # been available since 3.1 (or 75a96326cecb)
173 173 _command = command
174 174 def command(name, options=(), synopsis=None, norepo=False):
175 175 if norepo:
176 176 commands.norepo += b' %s' % b' '.join(parsealiases(name))
177 177 return _command(name, list(options), synopsis)
178 178 else:
179 179 # for "historical portability":
180 180 # define "@command" annotation locally, because cmdutil.command
181 181 # has been available since 1.9 (or 2daa5179e73f)
182 182 def command(name, options=(), synopsis=None, norepo=False):
183 183 def decorator(func):
184 184 if synopsis:
185 185 cmdtable[name] = func, list(options), synopsis
186 186 else:
187 187 cmdtable[name] = func, list(options)
188 188 if norepo:
189 189 commands.norepo += b' %s' % b' '.join(parsealiases(name))
190 190 return func
191 191 return decorator
192 192
193 193 try:
194 194 import mercurial.registrar
195 195 import mercurial.configitems
196 196 configtable = {}
197 197 configitem = mercurial.registrar.configitem(configtable)
198 198 configitem(b'perf', b'presleep',
199 199 default=mercurial.configitems.dynamicdefault,
200 200 )
201 201 configitem(b'perf', b'stub',
202 202 default=mercurial.configitems.dynamicdefault,
203 203 )
204 204 configitem(b'perf', b'parentscount',
205 205 default=mercurial.configitems.dynamicdefault,
206 206 )
207 207 configitem(b'perf', b'all-timing',
208 208 default=mercurial.configitems.dynamicdefault,
209 209 )
210 210 except (ImportError, AttributeError):
211 211 pass
212 212
213 213 def getlen(ui):
214 214 if ui.configbool(b"perf", b"stub", False):
215 215 return lambda x: 1
216 216 return len
217 217
218 218 def gettimer(ui, opts=None):
219 219 """return a timer function and formatter: (timer, formatter)
220 220
221 221 This function exists to gather the creation of formatter in a single
222 222 place instead of duplicating it in all performance commands."""
223 223
224 224 # enforce an idle period before execution to counteract power management
225 225 # experimental config: perf.presleep
226 226 time.sleep(getint(ui, b"perf", b"presleep", 1))
227 227
228 228 if opts is None:
229 229 opts = {}
230 230 # redirect all to stderr unless buffer api is in use
231 231 if not ui._buffers:
232 232 ui = ui.copy()
233 233 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
234 234 if uifout:
235 235 # for "historical portability":
236 236 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
237 237 uifout.set(ui.ferr)
238 238
239 239 # get a formatter
240 240 uiformatter = getattr(ui, 'formatter', None)
241 241 if uiformatter:
242 242 fm = uiformatter(b'perf', opts)
243 243 else:
244 244 # for "historical portability":
245 245 # define formatter locally, because ui.formatter has been
246 246 # available since 2.2 (or ae5f92e154d3)
247 247 from mercurial import node
248 248 class defaultformatter(object):
249 249 """Minimized composition of baseformatter and plainformatter
250 250 """
251 251 def __init__(self, ui, topic, opts):
252 252 self._ui = ui
253 253 if ui.debugflag:
254 254 self.hexfunc = node.hex
255 255 else:
256 256 self.hexfunc = node.short
257 257 def __nonzero__(self):
258 258 return False
259 259 __bool__ = __nonzero__
260 260 def startitem(self):
261 261 pass
262 262 def data(self, **data):
263 263 pass
264 264 def write(self, fields, deftext, *fielddata, **opts):
265 265 self._ui.write(deftext % fielddata, **opts)
266 266 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
267 267 if cond:
268 268 self._ui.write(deftext % fielddata, **opts)
269 269 def plain(self, text, **opts):
270 270 self._ui.write(text, **opts)
271 271 def end(self):
272 272 pass
273 273 fm = defaultformatter(ui, b'perf', opts)
274 274
275 275 # stub function, runs code only once instead of in a loop
276 276 # experimental config: perf.stub
277 277 if ui.configbool(b"perf", b"stub", False):
278 278 return functools.partial(stub_timer, fm), fm
279 279
280 280 # experimental config: perf.all-timing
281 281 displayall = ui.configbool(b"perf", b"all-timing", False)
282 282 return functools.partial(_timer, fm, displayall=displayall), fm
283 283
284 284 def stub_timer(fm, func, setup=None, title=None):
285 285 if setup is not None:
286 286 setup()
287 287 func()
288 288
289 289 @contextlib.contextmanager
290 290 def timeone():
291 291 r = []
292 292 ostart = os.times()
293 293 cstart = util.timer()
294 294 yield r
295 295 cstop = util.timer()
296 296 ostop = os.times()
297 297 a, b = ostart, ostop
298 298 r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
299 299
300 300 def _timer(fm, func, setup=None, title=None, displayall=False):
301 301 gc.collect()
302 302 results = []
303 303 begin = util.timer()
304 304 count = 0
305 305 while True:
306 306 if setup is not None:
307 307 setup()
308 308 with timeone() as item:
309 309 r = func()
310 310 count += 1
311 311 results.append(item[0])
312 312 cstop = util.timer()
313 313 if cstop - begin > 3 and count >= 100:
314 314 break
315 315 if cstop - begin > 10 and count >= 3:
316 316 break
317 317
318 318 formatone(fm, results, title=title, result=r,
319 319 displayall=displayall)
320 320
321 321 def formatone(fm, timings, title=None, result=None, displayall=False):
322 322
323 323 count = len(timings)
324 324
325 325 fm.startitem()
326 326
327 327 if title:
328 328 fm.write(b'title', b'! %s\n', title)
329 329 if result:
330 330 fm.write(b'result', b'! result: %s\n', result)
331 331 def display(role, entry):
332 332 prefix = b''
333 333 if role != b'best':
334 334 prefix = b'%s.' % role
335 335 fm.plain(b'!')
336 336 fm.write(prefix + b'wall', b' wall %f', entry[0])
337 337 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
338 338 fm.write(prefix + b'user', b' user %f', entry[1])
339 339 fm.write(prefix + b'sys', b' sys %f', entry[2])
340 340 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
341 341 fm.plain(b'\n')
342 342 timings.sort()
343 343 min_val = timings[0]
344 344 display(b'best', min_val)
345 345 if displayall:
346 346 max_val = timings[-1]
347 347 display(b'max', max_val)
348 348 avg = tuple([sum(x) / count for x in zip(*timings)])
349 349 display(b'avg', avg)
350 350 median = timings[len(timings) // 2]
351 351 display(b'median', median)
352 352
353 353 # utilities for historical portability
354 354
355 355 def getint(ui, section, name, default):
356 356 # for "historical portability":
357 357 # ui.configint has been available since 1.9 (or fa2b596db182)
358 358 v = ui.config(section, name, None)
359 359 if v is None:
360 360 return default
361 361 try:
362 362 return int(v)
363 363 except ValueError:
364 364 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
365 365 % (section, name, v))
366 366
367 367 def safeattrsetter(obj, name, ignoremissing=False):
368 368 """Ensure that 'obj' has 'name' attribute before subsequent setattr
369 369
370 370 This function is aborted, if 'obj' doesn't have 'name' attribute
371 371 at runtime. This avoids overlooking removal of an attribute, which
372 372 breaks assumption of performance measurement, in the future.
373 373
374 374 This function returns the object to (1) assign a new value, and
375 375 (2) restore an original value to the attribute.
376 376
377 377 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
378 378 abortion, and this function returns None. This is useful to
379 379 examine an attribute, which isn't ensured in all Mercurial
380 380 versions.
381 381 """
382 382 if not util.safehasattr(obj, name):
383 383 if ignoremissing:
384 384 return None
385 385 raise error.Abort((b"missing attribute %s of %s might break assumption"
386 386 b" of performance measurement") % (name, obj))
387 387
388 388 origvalue = getattr(obj, _sysstr(name))
389 389 class attrutil(object):
390 390 def set(self, newvalue):
391 391 setattr(obj, _sysstr(name), newvalue)
392 392 def restore(self):
393 393 setattr(obj, _sysstr(name), origvalue)
394 394
395 395 return attrutil()
396 396
397 397 # utilities to examine each internal API changes
398 398
399 399 def getbranchmapsubsettable():
400 400 # for "historical portability":
401 401 # subsettable is defined in:
402 402 # - branchmap since 2.9 (or 175c6fd8cacc)
403 403 # - repoview since 2.5 (or 59a9f18d4587)
404 404 for mod in (branchmap, repoview):
405 405 subsettable = getattr(mod, 'subsettable', None)
406 406 if subsettable:
407 407 return subsettable
408 408
409 409 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
410 410 # branchmap and repoview modules exist, but subsettable attribute
411 411 # doesn't)
412 412 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
413 413 hint=b"use 2.5 or later")
414 414
415 415 def getsvfs(repo):
416 416 """Return appropriate object to access files under .hg/store
417 417 """
418 418 # for "historical portability":
419 419 # repo.svfs has been available since 2.3 (or 7034365089bf)
420 420 svfs = getattr(repo, 'svfs', None)
421 421 if svfs:
422 422 return svfs
423 423 else:
424 424 return getattr(repo, 'sopener')
425 425
426 426 def getvfs(repo):
427 427 """Return appropriate object to access files under .hg
428 428 """
429 429 # for "historical portability":
430 430 # repo.vfs has been available since 2.3 (or 7034365089bf)
431 431 vfs = getattr(repo, 'vfs', None)
432 432 if vfs:
433 433 return vfs
434 434 else:
435 435 return getattr(repo, 'opener')
436 436
437 437 def repocleartagscachefunc(repo):
438 438 """Return the function to clear tags cache according to repo internal API
439 439 """
440 440 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
441 441 # in this case, setattr(repo, '_tagscache', None) or so isn't
442 442 # correct way to clear tags cache, because existing code paths
443 443 # expect _tagscache to be a structured object.
444 444 def clearcache():
445 445 # _tagscache has been filteredpropertycache since 2.5 (or
446 446 # 98c867ac1330), and delattr() can't work in such case
447 447 if b'_tagscache' in vars(repo):
448 448 del repo.__dict__[b'_tagscache']
449 449 return clearcache
450 450
451 451 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
452 452 if repotags: # since 1.4 (or 5614a628d173)
453 453 return lambda : repotags.set(None)
454 454
455 455 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
456 456 if repotagscache: # since 0.6 (or d7df759d0e97)
457 457 return lambda : repotagscache.set(None)
458 458
459 459 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
460 460 # this point, but it isn't so problematic, because:
461 461 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
462 462 # in perftags() causes failure soon
463 463 # - perf.py itself has been available since 1.1 (or eb240755386d)
464 464 raise error.Abort((b"tags API of this hg command is unknown"))
465 465
466 466 # utilities to clear cache
467 467
468 468 def clearfilecache(obj, attrname):
469 469 unfiltered = getattr(obj, 'unfiltered', None)
470 470 if unfiltered is not None:
471 471 obj = obj.unfiltered()
472 472 if attrname in vars(obj):
473 473 delattr(obj, attrname)
474 474 obj._filecache.pop(attrname, None)
475 475
476 476 def clearchangelog(repo):
477 477 if repo is not repo.unfiltered():
478 478 object.__setattr__(repo, r'_clcachekey', None)
479 479 object.__setattr__(repo, r'_clcache', None)
480 480 clearfilecache(repo.unfiltered(), 'changelog')
481 481
482 482 # perf commands
483 483
484 484 @command(b'perfwalk', formatteropts)
485 485 def perfwalk(ui, repo, *pats, **opts):
486 486 opts = _byteskwargs(opts)
487 487 timer, fm = gettimer(ui, opts)
488 488 m = scmutil.match(repo[None], pats, {})
489 489 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
490 490 ignored=False))))
491 491 fm.end()
492 492
493 493 @command(b'perfannotate', formatteropts)
494 494 def perfannotate(ui, repo, f, **opts):
495 495 opts = _byteskwargs(opts)
496 496 timer, fm = gettimer(ui, opts)
497 497 fc = repo[b'.'][f]
498 498 timer(lambda: len(fc.annotate(True)))
499 499 fm.end()
500 500
501 501 @command(b'perfstatus',
502 502 [(b'u', b'unknown', False,
503 503 b'ask status to look for unknown files')] + formatteropts)
504 504 def perfstatus(ui, repo, **opts):
505 505 opts = _byteskwargs(opts)
506 506 #m = match.always(repo.root, repo.getcwd())
507 507 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
508 508 # False))))
509 509 timer, fm = gettimer(ui, opts)
510 510 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
511 511 fm.end()
512 512
513 513 @command(b'perfaddremove', formatteropts)
514 514 def perfaddremove(ui, repo, **opts):
515 515 opts = _byteskwargs(opts)
516 516 timer, fm = gettimer(ui, opts)
517 517 try:
518 518 oldquiet = repo.ui.quiet
519 519 repo.ui.quiet = True
520 520 matcher = scmutil.match(repo[None])
521 521 opts[b'dry_run'] = True
522 522 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
523 523 finally:
524 524 repo.ui.quiet = oldquiet
525 525 fm.end()
526 526
527 527 def clearcaches(cl):
528 528 # behave somewhat consistently across internal API changes
529 529 if util.safehasattr(cl, b'clearcaches'):
530 530 cl.clearcaches()
531 531 elif util.safehasattr(cl, b'_nodecache'):
532 532 from mercurial.node import nullid, nullrev
533 533 cl._nodecache = {nullid: nullrev}
534 534 cl._nodepos = None
535 535
536 536 @command(b'perfheads', formatteropts)
537 537 def perfheads(ui, repo, **opts):
538 538 """benchmark the computation of a changelog heads"""
539 539 opts = _byteskwargs(opts)
540 540 timer, fm = gettimer(ui, opts)
541 541 cl = repo.changelog
542 542 def s():
543 543 clearcaches(cl)
544 544 def d():
545 545 len(cl.headrevs())
546 546 timer(d, setup=s)
547 547 fm.end()
548 548
549 549 @command(b'perftags', formatteropts+
550 550 [
551 551 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
552 552 ])
553 553 def perftags(ui, repo, **opts):
554 554 opts = _byteskwargs(opts)
555 555 timer, fm = gettimer(ui, opts)
556 556 repocleartagscache = repocleartagscachefunc(repo)
557 557 clearrevlogs = opts[b'clear_revlogs']
558 558 def s():
559 559 if clearrevlogs:
560 560 clearchangelog(repo)
561 561 clearfilecache(repo.unfiltered(), 'manifest')
562 562 repocleartagscache()
563 563 def t():
564 564 return len(repo.tags())
565 565 timer(t, setup=s)
566 566 fm.end()
567 567
568 568 @command(b'perfancestors', formatteropts)
569 569 def perfancestors(ui, repo, **opts):
570 570 opts = _byteskwargs(opts)
571 571 timer, fm = gettimer(ui, opts)
572 572 heads = repo.changelog.headrevs()
573 573 def d():
574 574 for a in repo.changelog.ancestors(heads):
575 575 pass
576 576 timer(d)
577 577 fm.end()
578 578
579 579 @command(b'perfancestorset', formatteropts)
580 580 def perfancestorset(ui, repo, revset, **opts):
581 581 opts = _byteskwargs(opts)
582 582 timer, fm = gettimer(ui, opts)
583 583 revs = repo.revs(revset)
584 584 heads = repo.changelog.headrevs()
585 585 def d():
586 586 s = repo.changelog.ancestors(heads)
587 587 for rev in revs:
588 588 rev in s
589 589 timer(d)
590 590 fm.end()
591 591
592 592 @command(b'perfdiscovery', formatteropts, b'PATH')
593 593 def perfdiscovery(ui, repo, path, **opts):
594 594 """benchmark discovery between local repo and the peer at given path
595 595 """
596 596 repos = [repo, None]
597 597 timer, fm = gettimer(ui, opts)
598 598 path = ui.expandpath(path)
599 599
600 600 def s():
601 601 repos[1] = hg.peer(ui, opts, path)
602 602 def d():
603 603 setdiscovery.findcommonheads(ui, *repos)
604 604 timer(d, setup=s)
605 605 fm.end()
606 606
607 607 @command(b'perfbookmarks', formatteropts +
608 608 [
609 609 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
610 610 ])
611 611 def perfbookmarks(ui, repo, **opts):
612 612 """benchmark parsing bookmarks from disk to memory"""
613 613 opts = _byteskwargs(opts)
614 614 timer, fm = gettimer(ui, opts)
615 615
616 616 clearrevlogs = opts[b'clear_revlogs']
617 617 def s():
618 618 if clearrevlogs:
619 619 clearchangelog(repo)
620 620 clearfilecache(repo, b'_bookmarks')
621 621 def d():
622 622 repo._bookmarks
623 623 timer(d, setup=s)
624 624 fm.end()
625 625
626 626 @command(b'perfbundleread', formatteropts, b'BUNDLE')
627 627 def perfbundleread(ui, repo, bundlepath, **opts):
628 628 """Benchmark reading of bundle files.
629 629
630 630 This command is meant to isolate the I/O part of bundle reading as
631 631 much as possible.
632 632 """
633 633 from mercurial import (
634 634 bundle2,
635 635 exchange,
636 636 streamclone,
637 637 )
638 638
639 639 opts = _byteskwargs(opts)
640 640
641 641 def makebench(fn):
642 642 def run():
643 643 with open(bundlepath, b'rb') as fh:
644 644 bundle = exchange.readbundle(ui, fh, bundlepath)
645 645 fn(bundle)
646 646
647 647 return run
648 648
649 649 def makereadnbytes(size):
650 650 def run():
651 651 with open(bundlepath, b'rb') as fh:
652 652 bundle = exchange.readbundle(ui, fh, bundlepath)
653 653 while bundle.read(size):
654 654 pass
655 655
656 656 return run
657 657
658 658 def makestdioread(size):
659 659 def run():
660 660 with open(bundlepath, b'rb') as fh:
661 661 while fh.read(size):
662 662 pass
663 663
664 664 return run
665 665
666 666 # bundle1
667 667
668 668 def deltaiter(bundle):
669 669 for delta in bundle.deltaiter():
670 670 pass
671 671
672 672 def iterchunks(bundle):
673 673 for chunk in bundle.getchunks():
674 674 pass
675 675
676 676 # bundle2
677 677
678 678 def forwardchunks(bundle):
679 679 for chunk in bundle._forwardchunks():
680 680 pass
681 681
682 682 def iterparts(bundle):
683 683 for part in bundle.iterparts():
684 684 pass
685 685
686 686 def iterpartsseekable(bundle):
687 687 for part in bundle.iterparts(seekable=True):
688 688 pass
689 689
690 690 def seek(bundle):
691 691 for part in bundle.iterparts(seekable=True):
692 692 part.seek(0, os.SEEK_END)
693 693
694 694 def makepartreadnbytes(size):
695 695 def run():
696 696 with open(bundlepath, b'rb') as fh:
697 697 bundle = exchange.readbundle(ui, fh, bundlepath)
698 698 for part in bundle.iterparts():
699 699 while part.read(size):
700 700 pass
701 701
702 702 return run
703 703
704 704 benches = [
705 705 (makestdioread(8192), b'read(8k)'),
706 706 (makestdioread(16384), b'read(16k)'),
707 707 (makestdioread(32768), b'read(32k)'),
708 708 (makestdioread(131072), b'read(128k)'),
709 709 ]
710 710
711 711 with open(bundlepath, b'rb') as fh:
712 712 bundle = exchange.readbundle(ui, fh, bundlepath)
713 713
714 714 if isinstance(bundle, changegroup.cg1unpacker):
715 715 benches.extend([
716 716 (makebench(deltaiter), b'cg1 deltaiter()'),
717 717 (makebench(iterchunks), b'cg1 getchunks()'),
718 718 (makereadnbytes(8192), b'cg1 read(8k)'),
719 719 (makereadnbytes(16384), b'cg1 read(16k)'),
720 720 (makereadnbytes(32768), b'cg1 read(32k)'),
721 721 (makereadnbytes(131072), b'cg1 read(128k)'),
722 722 ])
723 723 elif isinstance(bundle, bundle2.unbundle20):
724 724 benches.extend([
725 725 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
726 726 (makebench(iterparts), b'bundle2 iterparts()'),
727 727 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
728 728 (makebench(seek), b'bundle2 part seek()'),
729 729 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
730 730 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
731 731 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
732 732 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
733 733 ])
734 734 elif isinstance(bundle, streamclone.streamcloneapplier):
735 735 raise error.Abort(b'stream clone bundles not supported')
736 736 else:
737 737 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
738 738
739 739 for fn, title in benches:
740 740 timer, fm = gettimer(ui, opts)
741 741 timer(fn, title=title)
742 742 fm.end()
743 743
744 744 @command(b'perfchangegroupchangelog', formatteropts +
745 745 [(b'', b'cgversion', b'02', b'changegroup version'),
746 746 (b'r', b'rev', b'', b'revisions to add to changegroup')])
747 747 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
748 748 """Benchmark producing a changelog group for a changegroup.
749 749
750 750 This measures the time spent processing the changelog during a
751 751 bundle operation. This occurs during `hg bundle` and on a server
752 752 processing a `getbundle` wire protocol request (handles clones
753 753 and pull requests).
754 754
755 755 By default, all revisions are added to the changegroup.
756 756 """
757 757 opts = _byteskwargs(opts)
758 758 cl = repo.changelog
759 759 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
760 760 bundler = changegroup.getbundler(cgversion, repo)
761 761
762 762 def d():
763 763 state, chunks = bundler._generatechangelog(cl, nodes)
764 764 for chunk in chunks:
765 765 pass
766 766
767 767 timer, fm = gettimer(ui, opts)
768 768
769 769 # Terminal printing can interfere with timing. So disable it.
770 770 with ui.configoverride({(b'progress', b'disable'): True}):
771 771 timer(d)
772 772
773 773 fm.end()
774 774
775 775 @command(b'perfdirs', formatteropts)
776 776 def perfdirs(ui, repo, **opts):
777 777 opts = _byteskwargs(opts)
778 778 timer, fm = gettimer(ui, opts)
779 779 dirstate = repo.dirstate
780 780 b'a' in dirstate
781 781 def d():
782 782 dirstate.hasdir(b'a')
783 783 del dirstate._map._dirs
784 784 timer(d)
785 785 fm.end()
786 786
787 787 @command(b'perfdirstate', formatteropts)
788 788 def perfdirstate(ui, repo, **opts):
789 789 opts = _byteskwargs(opts)
790 790 timer, fm = gettimer(ui, opts)
791 791 b"a" in repo.dirstate
792 792 def d():
793 793 repo.dirstate.invalidate()
794 794 b"a" in repo.dirstate
795 795 timer(d)
796 796 fm.end()
797 797
798 798 @command(b'perfdirstatedirs', formatteropts)
799 799 def perfdirstatedirs(ui, repo, **opts):
800 800 opts = _byteskwargs(opts)
801 801 timer, fm = gettimer(ui, opts)
802 802 b"a" in repo.dirstate
803 803 def d():
804 804 repo.dirstate.hasdir(b"a")
805 805 del repo.dirstate._map._dirs
806 806 timer(d)
807 807 fm.end()
808 808
809 809 @command(b'perfdirstatefoldmap', formatteropts)
810 810 def perfdirstatefoldmap(ui, repo, **opts):
811 811 opts = _byteskwargs(opts)
812 812 timer, fm = gettimer(ui, opts)
813 813 dirstate = repo.dirstate
814 814 b'a' in dirstate
815 815 def d():
816 816 dirstate._map.filefoldmap.get(b'a')
817 817 del dirstate._map.filefoldmap
818 818 timer(d)
819 819 fm.end()
820 820
821 821 @command(b'perfdirfoldmap', formatteropts)
822 822 def perfdirfoldmap(ui, repo, **opts):
823 823 opts = _byteskwargs(opts)
824 824 timer, fm = gettimer(ui, opts)
825 825 dirstate = repo.dirstate
826 826 b'a' in dirstate
827 827 def d():
828 828 dirstate._map.dirfoldmap.get(b'a')
829 829 del dirstate._map.dirfoldmap
830 830 del dirstate._map._dirs
831 831 timer(d)
832 832 fm.end()
833 833
834 834 @command(b'perfdirstatewrite', formatteropts)
835 835 def perfdirstatewrite(ui, repo, **opts):
836 836 opts = _byteskwargs(opts)
837 837 timer, fm = gettimer(ui, opts)
838 838 ds = repo.dirstate
839 839 b"a" in ds
840 840 def d():
841 841 ds._dirty = True
842 842 ds.write(repo.currenttransaction())
843 843 timer(d)
844 844 fm.end()
845 845
846 846 @command(b'perfmergecalculate',
847 847 [(b'r', b'rev', b'.', b'rev to merge against')] + formatteropts)
848 848 def perfmergecalculate(ui, repo, rev, **opts):
849 849 opts = _byteskwargs(opts)
850 850 timer, fm = gettimer(ui, opts)
851 851 wctx = repo[None]
852 852 rctx = scmutil.revsingle(repo, rev, rev)
853 853 ancestor = wctx.ancestor(rctx)
854 854 # we don't want working dir files to be stat'd in the benchmark, so prime
855 855 # that cache
856 856 wctx.dirty()
857 857 def d():
858 858 # acceptremote is True because we don't want prompts in the middle of
859 859 # our benchmark
860 860 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
861 861 acceptremote=True, followcopies=True)
862 862 timer(d)
863 863 fm.end()
864 864
865 865 @command(b'perfpathcopies', [], b"REV REV")
866 866 def perfpathcopies(ui, repo, rev1, rev2, **opts):
867 867 """benchmark the copy tracing logic"""
868 868 opts = _byteskwargs(opts)
869 869 timer, fm = gettimer(ui, opts)
870 870 ctx1 = scmutil.revsingle(repo, rev1, rev1)
871 871 ctx2 = scmutil.revsingle(repo, rev2, rev2)
872 872 def d():
873 873 copies.pathcopies(ctx1, ctx2)
874 874 timer(d)
875 875 fm.end()
876 876
877 877 @command(b'perfphases',
878 878 [(b'', b'full', False, b'include file reading time too'),
879 879 ], b"")
880 880 def perfphases(ui, repo, **opts):
881 881 """benchmark phasesets computation"""
882 882 opts = _byteskwargs(opts)
883 883 timer, fm = gettimer(ui, opts)
884 884 _phases = repo._phasecache
885 885 full = opts.get(b'full')
886 886 def d():
887 887 phases = _phases
888 888 if full:
889 889 clearfilecache(repo, b'_phasecache')
890 890 phases = repo._phasecache
891 891 phases.invalidate()
892 892 phases.loadphaserevs(repo)
893 893 timer(d)
894 894 fm.end()
895 895
896 896 @command(b'perfphasesremote',
897 897 [], b"[DEST]")
898 898 def perfphasesremote(ui, repo, dest=None, **opts):
899 899 """benchmark time needed to analyse phases of the remote server"""
900 900 from mercurial.node import (
901 901 bin,
902 902 )
903 903 from mercurial import (
904 904 exchange,
905 905 hg,
906 906 phases,
907 907 )
908 908 opts = _byteskwargs(opts)
909 909 timer, fm = gettimer(ui, opts)
910 910
911 911 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
912 912 if not path:
913 913 raise error.Abort((b'default repository not configured!'),
914 914 hint=(b"see 'hg help config.paths'"))
915 915 dest = path.pushloc or path.loc
916 916 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
917 917 other = hg.peer(repo, opts, dest)
918 918
919 919 # easier to perform discovery through the operation
920 920 op = exchange.pushoperation(repo, other)
921 921 exchange._pushdiscoverychangeset(op)
922 922
923 923 remotesubset = op.fallbackheads
924 924
925 925 with other.commandexecutor() as e:
926 926 remotephases = e.callcommand(b'listkeys',
927 927 {b'namespace': b'phases'}).result()
928 928 del other
929 929 publishing = remotephases.get(b'publishing', False)
930 930 if publishing:
931 931 ui.status((b'publishing: yes\n'))
932 932 else:
933 933 ui.status((b'publishing: no\n'))
934 934
935 935 nodemap = repo.changelog.nodemap
936 936 nonpublishroots = 0
937 937 for nhex, phase in remotephases.iteritems():
938 938 if nhex == b'publishing': # ignore data related to publish option
939 939 continue
940 940 node = bin(nhex)
941 941 if node in nodemap and int(phase):
942 942 nonpublishroots += 1
943 943 ui.status((b'number of roots: %d\n') % len(remotephases))
944 944 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
945 945 def d():
946 946 phases.remotephasessummary(repo,
947 947 remotesubset,
948 948 remotephases)
949 949 timer(d)
950 950 fm.end()
951 951
952 952 @command(b'perfmanifest',[
953 953 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
954 954 (b'', b'clear-disk', False, b'clear on-disk caches too'),
955 955 ] + formatteropts, b'REV|NODE')
956 956 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
957 957 """benchmark the time to read a manifest from disk and return a usable
958 958 dict-like object
959 959
960 960 Manifest caches are cleared before retrieval."""
961 961 opts = _byteskwargs(opts)
962 962 timer, fm = gettimer(ui, opts)
963 963 if not manifest_rev:
964 964 ctx = scmutil.revsingle(repo, rev, rev)
965 965 t = ctx.manifestnode()
966 966 else:
967 967 from mercurial.node import bin
968 968
969 969 if len(rev) == 40:
970 970 t = bin(rev)
971 971 else:
972 972 try:
973 973 rev = int(rev)
974 974
975 975 if util.safehasattr(repo.manifestlog, b'getstorage'):
976 976 t = repo.manifestlog.getstorage(b'').node(rev)
977 977 else:
978 978 t = repo.manifestlog._revlog.lookup(rev)
979 979 except ValueError:
980 980 raise error.Abort(b'manifest revision must be integer or full '
981 981 b'node')
982 982 def d():
983 983 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
984 984 repo.manifestlog[t].read()
985 985 timer(d)
986 986 fm.end()
987 987
988 988 @command(b'perfchangeset', formatteropts)
989 989 def perfchangeset(ui, repo, rev, **opts):
990 990 opts = _byteskwargs(opts)
991 991 timer, fm = gettimer(ui, opts)
992 992 n = scmutil.revsingle(repo, rev).node()
993 993 def d():
994 994 repo.changelog.read(n)
995 995 #repo.changelog._cache = None
996 996 timer(d)
997 997 fm.end()
998 998
999 999 @command(b'perfignore', formatteropts)
1000 1000 def perfignore(ui, repo, **opts):
1001 1001 """benchmark operation related to computing ignore"""
1002 1002 opts = _byteskwargs(opts)
1003 1003 timer, fm = gettimer(ui, opts)
1004 1004 dirstate = repo.dirstate
1005 1005
1006 1006 def setupone():
1007 1007 dirstate.invalidate()
1008 1008 clearfilecache(dirstate, b'_ignore')
1009 1009
1010 1010 def runone():
1011 1011 dirstate._ignore
1012 1012
1013 1013 timer(runone, setup=setupone, title=b"load")
1014 1014 fm.end()
1015 1015
1016 1016 @command(b'perfindex', [
1017 (b'', b'rev', b'', b'revision to be looked up (default tip)'),
1017 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1018 1018 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1019 1019 ] + formatteropts)
1020 1020 def perfindex(ui, repo, **opts):
1021 1021 """benchmark index creation time followed by a lookup
1022 1022
1023 1023 The default is to look `tip` up. Depending on the index implementation,
1024 1024 the revision looked up can matters. For example, an implementation
1025 1025 scanning the index will have a faster lookup time for `--rev tip` than for
1026 `--rev 0`.
1026 `--rev 0`. The number of looked up revisions and their order can also
1027 matters.
1028
1029 Example of useful set to test:
1030 * tip
1031 * 0
1032 * -10:
1033 * :10
1034 * -10: + :10
1035 * :10: + -10:
1036 * -10000:
1037 * -10000: + 0
1027 1038
1028 1039 It is not currently possible to check for lookup of a missing node."""
1029 1040 import mercurial.revlog
1030 1041 opts = _byteskwargs(opts)
1031 1042 timer, fm = gettimer(ui, opts)
1032 1043 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1033 1044 if opts[b'no_lookup']:
1034 n = None
1035 elif opts[b'rev'] is None:
1036 n = repo[b"tip"].node()
1045 if opts['rev']:
1046 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1047 nodes = []
1048 elif not opts[b'rev']:
1049 nodes = [repo[b"tip"].node()]
1037 1050 else:
1038 rev = scmutil.revsingle(repo, opts[b'rev'])
1039 n = repo[rev].node()
1051 revs = scmutil.revrange(repo, opts[b'rev'])
1052 cl = repo.changelog
1053 nodes = [cl.node(r) for r in revs]
1040 1054
1041 1055 unfi = repo.unfiltered()
1042 1056 # find the filecache func directly
1043 1057 # This avoid polluting the benchmark with the filecache logic
1044 1058 makecl = unfi.__class__.changelog.func
1045 1059 def setup():
1046 1060 # probably not necessary, but for good measure
1047 1061 clearchangelog(unfi)
1048 1062 def d():
1049 1063 cl = makecl(unfi)
1050 if n is not None:
1064 for n in nodes:
1051 1065 cl.rev(n)
1052 1066 timer(d, setup=setup)
1053 1067 fm.end()
1054 1068
1055 1069 @command(b'perfstartup', formatteropts)
1056 1070 def perfstartup(ui, repo, **opts):
1057 1071 opts = _byteskwargs(opts)
1058 1072 timer, fm = gettimer(ui, opts)
1059 1073 def d():
1060 1074 if os.name != r'nt':
1061 1075 os.system(b"HGRCPATH= %s version -q > /dev/null" %
1062 1076 fsencode(sys.argv[0]))
1063 1077 else:
1064 1078 os.environ[r'HGRCPATH'] = r' '
1065 1079 os.system(r"%s version -q > NUL" % sys.argv[0])
1066 1080 timer(d)
1067 1081 fm.end()
1068 1082
1069 1083 @command(b'perfparents', formatteropts)
1070 1084 def perfparents(ui, repo, **opts):
1071 1085 opts = _byteskwargs(opts)
1072 1086 timer, fm = gettimer(ui, opts)
1073 1087 # control the number of commits perfparents iterates over
1074 1088 # experimental config: perf.parentscount
1075 1089 count = getint(ui, b"perf", b"parentscount", 1000)
1076 1090 if len(repo.changelog) < count:
1077 1091 raise error.Abort(b"repo needs %d commits for this test" % count)
1078 1092 repo = repo.unfiltered()
1079 1093 nl = [repo.changelog.node(i) for i in _xrange(count)]
1080 1094 def d():
1081 1095 for n in nl:
1082 1096 repo.changelog.parents(n)
1083 1097 timer(d)
1084 1098 fm.end()
1085 1099
1086 1100 @command(b'perfctxfiles', formatteropts)
1087 1101 def perfctxfiles(ui, repo, x, **opts):
1088 1102 opts = _byteskwargs(opts)
1089 1103 x = int(x)
1090 1104 timer, fm = gettimer(ui, opts)
1091 1105 def d():
1092 1106 len(repo[x].files())
1093 1107 timer(d)
1094 1108 fm.end()
1095 1109
1096 1110 @command(b'perfrawfiles', formatteropts)
1097 1111 def perfrawfiles(ui, repo, x, **opts):
1098 1112 opts = _byteskwargs(opts)
1099 1113 x = int(x)
1100 1114 timer, fm = gettimer(ui, opts)
1101 1115 cl = repo.changelog
1102 1116 def d():
1103 1117 len(cl.read(x)[3])
1104 1118 timer(d)
1105 1119 fm.end()
1106 1120
1107 1121 @command(b'perflookup', formatteropts)
1108 1122 def perflookup(ui, repo, rev, **opts):
1109 1123 opts = _byteskwargs(opts)
1110 1124 timer, fm = gettimer(ui, opts)
1111 1125 timer(lambda: len(repo.lookup(rev)))
1112 1126 fm.end()
1113 1127
1114 1128 @command(b'perflinelogedits',
1115 1129 [(b'n', b'edits', 10000, b'number of edits'),
1116 1130 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1117 1131 ], norepo=True)
1118 1132 def perflinelogedits(ui, **opts):
1119 1133 from mercurial import linelog
1120 1134
1121 1135 opts = _byteskwargs(opts)
1122 1136
1123 1137 edits = opts[b'edits']
1124 1138 maxhunklines = opts[b'max_hunk_lines']
1125 1139
1126 1140 maxb1 = 100000
1127 1141 random.seed(0)
1128 1142 randint = random.randint
1129 1143 currentlines = 0
1130 1144 arglist = []
1131 1145 for rev in _xrange(edits):
1132 1146 a1 = randint(0, currentlines)
1133 1147 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1134 1148 b1 = randint(0, maxb1)
1135 1149 b2 = randint(b1, b1 + maxhunklines)
1136 1150 currentlines += (b2 - b1) - (a2 - a1)
1137 1151 arglist.append((rev, a1, a2, b1, b2))
1138 1152
1139 1153 def d():
1140 1154 ll = linelog.linelog()
1141 1155 for args in arglist:
1142 1156 ll.replacelines(*args)
1143 1157
1144 1158 timer, fm = gettimer(ui, opts)
1145 1159 timer(d)
1146 1160 fm.end()
1147 1161
1148 1162 @command(b'perfrevrange', formatteropts)
1149 1163 def perfrevrange(ui, repo, *specs, **opts):
1150 1164 opts = _byteskwargs(opts)
1151 1165 timer, fm = gettimer(ui, opts)
1152 1166 revrange = scmutil.revrange
1153 1167 timer(lambda: len(revrange(repo, specs)))
1154 1168 fm.end()
1155 1169
1156 1170 @command(b'perfnodelookup', formatteropts)
1157 1171 def perfnodelookup(ui, repo, rev, **opts):
1158 1172 opts = _byteskwargs(opts)
1159 1173 timer, fm = gettimer(ui, opts)
1160 1174 import mercurial.revlog
1161 1175 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1162 1176 n = scmutil.revsingle(repo, rev).node()
1163 1177 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1164 1178 def d():
1165 1179 cl.rev(n)
1166 1180 clearcaches(cl)
1167 1181 timer(d)
1168 1182 fm.end()
1169 1183
1170 1184 @command(b'perflog',
1171 1185 [(b'', b'rename', False, b'ask log to follow renames')
1172 1186 ] + formatteropts)
1173 1187 def perflog(ui, repo, rev=None, **opts):
1174 1188 opts = _byteskwargs(opts)
1175 1189 if rev is None:
1176 1190 rev=[]
1177 1191 timer, fm = gettimer(ui, opts)
1178 1192 ui.pushbuffer()
1179 1193 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1180 1194 copies=opts.get(b'rename')))
1181 1195 ui.popbuffer()
1182 1196 fm.end()
1183 1197
1184 1198 @command(b'perfmoonwalk', formatteropts)
1185 1199 def perfmoonwalk(ui, repo, **opts):
1186 1200 """benchmark walking the changelog backwards
1187 1201
1188 1202 This also loads the changelog data for each revision in the changelog.
1189 1203 """
1190 1204 opts = _byteskwargs(opts)
1191 1205 timer, fm = gettimer(ui, opts)
1192 1206 def moonwalk():
1193 1207 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1194 1208 ctx = repo[i]
1195 1209 ctx.branch() # read changelog data (in addition to the index)
1196 1210 timer(moonwalk)
1197 1211 fm.end()
1198 1212
1199 1213 @command(b'perftemplating',
1200 1214 [(b'r', b'rev', [], b'revisions to run the template on'),
1201 1215 ] + formatteropts)
1202 1216 def perftemplating(ui, repo, testedtemplate=None, **opts):
1203 1217 """test the rendering time of a given template"""
1204 1218 if makelogtemplater is None:
1205 1219 raise error.Abort((b"perftemplating not available with this Mercurial"),
1206 1220 hint=b"use 4.3 or later")
1207 1221
1208 1222 opts = _byteskwargs(opts)
1209 1223
1210 1224 nullui = ui.copy()
1211 1225 nullui.fout = open(os.devnull, r'wb')
1212 1226 nullui.disablepager()
1213 1227 revs = opts.get(b'rev')
1214 1228 if not revs:
1215 1229 revs = [b'all()']
1216 1230 revs = list(scmutil.revrange(repo, revs))
1217 1231
1218 1232 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1219 1233 b' {author|person}: {desc|firstline}\n')
1220 1234 if testedtemplate is None:
1221 1235 testedtemplate = defaulttemplate
1222 1236 displayer = makelogtemplater(nullui, repo, testedtemplate)
1223 1237 def format():
1224 1238 for r in revs:
1225 1239 ctx = repo[r]
1226 1240 displayer.show(ctx)
1227 1241 displayer.flush(ctx)
1228 1242
1229 1243 timer, fm = gettimer(ui, opts)
1230 1244 timer(format)
1231 1245 fm.end()
1232 1246
1233 1247 @command(b'perfhelper-pathcopies', formatteropts +
1234 1248 [
1235 1249 (b'r', b'revs', [], b'restrict search to these revisions'),
1236 1250 (b'', b'timing', False, b'provides extra data (costly)'),
1237 1251 ])
1238 1252 def perfhelperpathcopies(ui, repo, revs=[], **opts):
1239 1253 """find statistic about potential parameters for the `perftracecopies`
1240 1254
1241 1255 This command find source-destination pair relevant for copytracing testing.
1242 1256 It report value for some of the parameters that impact copy tracing time.
1243 1257
1244 1258 If `--timing` is set, rename detection is run and the associated timing
1245 1259 will be reported. The extra details comes at the cost of a slower command
1246 1260 execution.
1247 1261
1248 1262 Since the rename detection is only run once, other factors might easily
1249 1263 affect the precision of the timing. However it should give a good
1250 1264 approximation of which revision pairs are very costly.
1251 1265 """
1252 1266 opts = _byteskwargs(opts)
1253 1267 fm = ui.formatter(b'perf', opts)
1254 1268 dotiming = opts[b'timing']
1255 1269
1256 1270 if dotiming:
1257 1271 header = '%12s %12s %12s %12s %12s %12s\n'
1258 1272 output = ("%(source)12s %(destination)12s "
1259 1273 "%(nbrevs)12d %(nbmissingfiles)12d "
1260 1274 "%(nbrenamedfiles)12d %(time)18.5f\n")
1261 1275 header_names = ("source", "destination", "nb-revs", "nb-files",
1262 1276 "nb-renames", "time")
1263 1277 fm.plain(header % header_names)
1264 1278 else:
1265 1279 header = '%12s %12s %12s %12s\n'
1266 1280 output = ("%(source)12s %(destination)12s "
1267 1281 "%(nbrevs)12d %(nbmissingfiles)12d\n")
1268 1282 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
1269 1283
1270 1284 if not revs:
1271 1285 revs = ['all()']
1272 1286 revs = scmutil.revrange(repo, revs)
1273 1287
1274 1288 roi = repo.revs('merge() and %ld', revs)
1275 1289 for r in roi:
1276 1290 ctx = repo[r]
1277 1291 p1 = ctx.p1().rev()
1278 1292 p2 = ctx.p2().rev()
1279 1293 bases = repo.changelog._commonancestorsheads(p1, p2)
1280 1294 for p in (p1, p2):
1281 1295 for b in bases:
1282 1296 base = repo[b]
1283 1297 parent = repo[p]
1284 1298 missing = copies._computeforwardmissing(base, parent)
1285 1299 if not missing:
1286 1300 continue
1287 1301 data = {
1288 1302 b'source': base.hex(),
1289 1303 b'destination': parent.hex(),
1290 1304 b'nbrevs': len(repo.revs('%d::%d', b, p)),
1291 1305 b'nbmissingfiles': len(missing),
1292 1306 }
1293 1307 if dotiming:
1294 1308 begin = util.timer()
1295 1309 renames = copies.pathcopies(base, parent)
1296 1310 end = util.timer()
1297 1311 # not very stable timing since we did only one run
1298 1312 data['time'] = end - begin
1299 1313 data['nbrenamedfiles'] = len(renames)
1300 1314 fm.startitem()
1301 1315 fm.data(**data)
1302 1316 out = data.copy()
1303 1317 out['source'] = fm.hexfunc(base.node())
1304 1318 out['destination'] = fm.hexfunc(parent.node())
1305 1319 fm.plain(output % out)
1306 1320
1307 1321 fm.end()
1308 1322
1309 1323 @command(b'perfcca', formatteropts)
1310 1324 def perfcca(ui, repo, **opts):
1311 1325 opts = _byteskwargs(opts)
1312 1326 timer, fm = gettimer(ui, opts)
1313 1327 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1314 1328 fm.end()
1315 1329
1316 1330 @command(b'perffncacheload', formatteropts)
1317 1331 def perffncacheload(ui, repo, **opts):
1318 1332 opts = _byteskwargs(opts)
1319 1333 timer, fm = gettimer(ui, opts)
1320 1334 s = repo.store
1321 1335 def d():
1322 1336 s.fncache._load()
1323 1337 timer(d)
1324 1338 fm.end()
1325 1339
1326 1340 @command(b'perffncachewrite', formatteropts)
1327 1341 def perffncachewrite(ui, repo, **opts):
1328 1342 opts = _byteskwargs(opts)
1329 1343 timer, fm = gettimer(ui, opts)
1330 1344 s = repo.store
1331 1345 lock = repo.lock()
1332 1346 s.fncache._load()
1333 1347 tr = repo.transaction(b'perffncachewrite')
1334 1348 tr.addbackup(b'fncache')
1335 1349 def d():
1336 1350 s.fncache._dirty = True
1337 1351 s.fncache.write(tr)
1338 1352 timer(d)
1339 1353 tr.close()
1340 1354 lock.release()
1341 1355 fm.end()
1342 1356
1343 1357 @command(b'perffncacheencode', formatteropts)
1344 1358 def perffncacheencode(ui, repo, **opts):
1345 1359 opts = _byteskwargs(opts)
1346 1360 timer, fm = gettimer(ui, opts)
1347 1361 s = repo.store
1348 1362 s.fncache._load()
1349 1363 def d():
1350 1364 for p in s.fncache.entries:
1351 1365 s.encode(p)
1352 1366 timer(d)
1353 1367 fm.end()
1354 1368
1355 1369 def _bdiffworker(q, blocks, xdiff, ready, done):
1356 1370 while not done.is_set():
1357 1371 pair = q.get()
1358 1372 while pair is not None:
1359 1373 if xdiff:
1360 1374 mdiff.bdiff.xdiffblocks(*pair)
1361 1375 elif blocks:
1362 1376 mdiff.bdiff.blocks(*pair)
1363 1377 else:
1364 1378 mdiff.textdiff(*pair)
1365 1379 q.task_done()
1366 1380 pair = q.get()
1367 1381 q.task_done() # for the None one
1368 1382 with ready:
1369 1383 ready.wait()
1370 1384
1371 1385 def _manifestrevision(repo, mnode):
1372 1386 ml = repo.manifestlog
1373 1387
1374 1388 if util.safehasattr(ml, b'getstorage'):
1375 1389 store = ml.getstorage(b'')
1376 1390 else:
1377 1391 store = ml._revlog
1378 1392
1379 1393 return store.revision(mnode)
1380 1394
1381 1395 @command(b'perfbdiff', revlogopts + formatteropts + [
1382 1396 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1383 1397 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1384 1398 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1385 1399 (b'', b'blocks', False, b'test computing diffs into blocks'),
1386 1400 (b'', b'xdiff', False, b'use xdiff algorithm'),
1387 1401 ],
1388 1402
1389 1403 b'-c|-m|FILE REV')
1390 1404 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1391 1405 """benchmark a bdiff between revisions
1392 1406
1393 1407 By default, benchmark a bdiff between its delta parent and itself.
1394 1408
1395 1409 With ``--count``, benchmark bdiffs between delta parents and self for N
1396 1410 revisions starting at the specified revision.
1397 1411
1398 1412 With ``--alldata``, assume the requested revision is a changeset and
1399 1413 measure bdiffs for all changes related to that changeset (manifest
1400 1414 and filelogs).
1401 1415 """
1402 1416 opts = _byteskwargs(opts)
1403 1417
1404 1418 if opts[b'xdiff'] and not opts[b'blocks']:
1405 1419 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1406 1420
1407 1421 if opts[b'alldata']:
1408 1422 opts[b'changelog'] = True
1409 1423
1410 1424 if opts.get(b'changelog') or opts.get(b'manifest'):
1411 1425 file_, rev = None, file_
1412 1426 elif rev is None:
1413 1427 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1414 1428
1415 1429 blocks = opts[b'blocks']
1416 1430 xdiff = opts[b'xdiff']
1417 1431 textpairs = []
1418 1432
1419 1433 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1420 1434
1421 1435 startrev = r.rev(r.lookup(rev))
1422 1436 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1423 1437 if opts[b'alldata']:
1424 1438 # Load revisions associated with changeset.
1425 1439 ctx = repo[rev]
1426 1440 mtext = _manifestrevision(repo, ctx.manifestnode())
1427 1441 for pctx in ctx.parents():
1428 1442 pman = _manifestrevision(repo, pctx.manifestnode())
1429 1443 textpairs.append((pman, mtext))
1430 1444
1431 1445 # Load filelog revisions by iterating manifest delta.
1432 1446 man = ctx.manifest()
1433 1447 pman = ctx.p1().manifest()
1434 1448 for filename, change in pman.diff(man).items():
1435 1449 fctx = repo.file(filename)
1436 1450 f1 = fctx.revision(change[0][0] or -1)
1437 1451 f2 = fctx.revision(change[1][0] or -1)
1438 1452 textpairs.append((f1, f2))
1439 1453 else:
1440 1454 dp = r.deltaparent(rev)
1441 1455 textpairs.append((r.revision(dp), r.revision(rev)))
1442 1456
1443 1457 withthreads = threads > 0
1444 1458 if not withthreads:
1445 1459 def d():
1446 1460 for pair in textpairs:
1447 1461 if xdiff:
1448 1462 mdiff.bdiff.xdiffblocks(*pair)
1449 1463 elif blocks:
1450 1464 mdiff.bdiff.blocks(*pair)
1451 1465 else:
1452 1466 mdiff.textdiff(*pair)
1453 1467 else:
1454 1468 q = queue()
1455 1469 for i in _xrange(threads):
1456 1470 q.put(None)
1457 1471 ready = threading.Condition()
1458 1472 done = threading.Event()
1459 1473 for i in _xrange(threads):
1460 1474 threading.Thread(target=_bdiffworker,
1461 1475 args=(q, blocks, xdiff, ready, done)).start()
1462 1476 q.join()
1463 1477 def d():
1464 1478 for pair in textpairs:
1465 1479 q.put(pair)
1466 1480 for i in _xrange(threads):
1467 1481 q.put(None)
1468 1482 with ready:
1469 1483 ready.notify_all()
1470 1484 q.join()
1471 1485 timer, fm = gettimer(ui, opts)
1472 1486 timer(d)
1473 1487 fm.end()
1474 1488
1475 1489 if withthreads:
1476 1490 done.set()
1477 1491 for i in _xrange(threads):
1478 1492 q.put(None)
1479 1493 with ready:
1480 1494 ready.notify_all()
1481 1495
1482 1496 @command(b'perfunidiff', revlogopts + formatteropts + [
1483 1497 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1484 1498 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
1485 1499 ], b'-c|-m|FILE REV')
1486 1500 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1487 1501 """benchmark a unified diff between revisions
1488 1502
1489 1503 This doesn't include any copy tracing - it's just a unified diff
1490 1504 of the texts.
1491 1505
1492 1506 By default, benchmark a diff between its delta parent and itself.
1493 1507
1494 1508 With ``--count``, benchmark diffs between delta parents and self for N
1495 1509 revisions starting at the specified revision.
1496 1510
1497 1511 With ``--alldata``, assume the requested revision is a changeset and
1498 1512 measure diffs for all changes related to that changeset (manifest
1499 1513 and filelogs).
1500 1514 """
1501 1515 opts = _byteskwargs(opts)
1502 1516 if opts[b'alldata']:
1503 1517 opts[b'changelog'] = True
1504 1518
1505 1519 if opts.get(b'changelog') or opts.get(b'manifest'):
1506 1520 file_, rev = None, file_
1507 1521 elif rev is None:
1508 1522 raise error.CommandError(b'perfunidiff', b'invalid arguments')
1509 1523
1510 1524 textpairs = []
1511 1525
1512 1526 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
1513 1527
1514 1528 startrev = r.rev(r.lookup(rev))
1515 1529 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1516 1530 if opts[b'alldata']:
1517 1531 # Load revisions associated with changeset.
1518 1532 ctx = repo[rev]
1519 1533 mtext = _manifestrevision(repo, ctx.manifestnode())
1520 1534 for pctx in ctx.parents():
1521 1535 pman = _manifestrevision(repo, pctx.manifestnode())
1522 1536 textpairs.append((pman, mtext))
1523 1537
1524 1538 # Load filelog revisions by iterating manifest delta.
1525 1539 man = ctx.manifest()
1526 1540 pman = ctx.p1().manifest()
1527 1541 for filename, change in pman.diff(man).items():
1528 1542 fctx = repo.file(filename)
1529 1543 f1 = fctx.revision(change[0][0] or -1)
1530 1544 f2 = fctx.revision(change[1][0] or -1)
1531 1545 textpairs.append((f1, f2))
1532 1546 else:
1533 1547 dp = r.deltaparent(rev)
1534 1548 textpairs.append((r.revision(dp), r.revision(rev)))
1535 1549
1536 1550 def d():
1537 1551 for left, right in textpairs:
1538 1552 # The date strings don't matter, so we pass empty strings.
1539 1553 headerlines, hunks = mdiff.unidiff(
1540 1554 left, b'', right, b'', b'left', b'right', binary=False)
1541 1555 # consume iterators in roughly the way patch.py does
1542 1556 b'\n'.join(headerlines)
1543 1557 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1544 1558 timer, fm = gettimer(ui, opts)
1545 1559 timer(d)
1546 1560 fm.end()
1547 1561
1548 1562 @command(b'perfdiffwd', formatteropts)
1549 1563 def perfdiffwd(ui, repo, **opts):
1550 1564 """Profile diff of working directory changes"""
1551 1565 opts = _byteskwargs(opts)
1552 1566 timer, fm = gettimer(ui, opts)
1553 1567 options = {
1554 1568 'w': 'ignore_all_space',
1555 1569 'b': 'ignore_space_change',
1556 1570 'B': 'ignore_blank_lines',
1557 1571 }
1558 1572
1559 1573 for diffopt in ('', 'w', 'b', 'B', 'wB'):
1560 1574 opts = dict((options[c], b'1') for c in diffopt)
1561 1575 def d():
1562 1576 ui.pushbuffer()
1563 1577 commands.diff(ui, repo, **opts)
1564 1578 ui.popbuffer()
1565 1579 diffopt = diffopt.encode('ascii')
1566 1580 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
1567 1581 timer(d, title=title)
1568 1582 fm.end()
1569 1583
1570 1584 @command(b'perfrevlogindex', revlogopts + formatteropts,
1571 1585 b'-c|-m|FILE')
1572 1586 def perfrevlogindex(ui, repo, file_=None, **opts):
1573 1587 """Benchmark operations against a revlog index.
1574 1588
1575 1589 This tests constructing a revlog instance, reading index data,
1576 1590 parsing index data, and performing various operations related to
1577 1591 index data.
1578 1592 """
1579 1593
1580 1594 opts = _byteskwargs(opts)
1581 1595
1582 1596 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
1583 1597
1584 1598 opener = getattr(rl, 'opener') # trick linter
1585 1599 indexfile = rl.indexfile
1586 1600 data = opener.read(indexfile)
1587 1601
1588 1602 header = struct.unpack(b'>I', data[0:4])[0]
1589 1603 version = header & 0xFFFF
1590 1604 if version == 1:
1591 1605 revlogio = revlog.revlogio()
1592 1606 inline = header & (1 << 16)
1593 1607 else:
1594 1608 raise error.Abort((b'unsupported revlog version: %d') % version)
1595 1609
1596 1610 rllen = len(rl)
1597 1611
1598 1612 node0 = rl.node(0)
1599 1613 node25 = rl.node(rllen // 4)
1600 1614 node50 = rl.node(rllen // 2)
1601 1615 node75 = rl.node(rllen // 4 * 3)
1602 1616 node100 = rl.node(rllen - 1)
1603 1617
1604 1618 allrevs = range(rllen)
1605 1619 allrevsrev = list(reversed(allrevs))
1606 1620 allnodes = [rl.node(rev) for rev in range(rllen)]
1607 1621 allnodesrev = list(reversed(allnodes))
1608 1622
1609 1623 def constructor():
1610 1624 revlog.revlog(opener, indexfile)
1611 1625
1612 1626 def read():
1613 1627 with opener(indexfile) as fh:
1614 1628 fh.read()
1615 1629
1616 1630 def parseindex():
1617 1631 revlogio.parseindex(data, inline)
1618 1632
1619 1633 def getentry(revornode):
1620 1634 index = revlogio.parseindex(data, inline)[0]
1621 1635 index[revornode]
1622 1636
1623 1637 def getentries(revs, count=1):
1624 1638 index = revlogio.parseindex(data, inline)[0]
1625 1639
1626 1640 for i in range(count):
1627 1641 for rev in revs:
1628 1642 index[rev]
1629 1643
1630 1644 def resolvenode(node):
1631 1645 nodemap = revlogio.parseindex(data, inline)[1]
1632 1646 # This only works for the C code.
1633 1647 if nodemap is None:
1634 1648 return
1635 1649
1636 1650 try:
1637 1651 nodemap[node]
1638 1652 except error.RevlogError:
1639 1653 pass
1640 1654
1641 1655 def resolvenodes(nodes, count=1):
1642 1656 nodemap = revlogio.parseindex(data, inline)[1]
1643 1657 if nodemap is None:
1644 1658 return
1645 1659
1646 1660 for i in range(count):
1647 1661 for node in nodes:
1648 1662 try:
1649 1663 nodemap[node]
1650 1664 except error.RevlogError:
1651 1665 pass
1652 1666
1653 1667 benches = [
1654 1668 (constructor, b'revlog constructor'),
1655 1669 (read, b'read'),
1656 1670 (parseindex, b'create index object'),
1657 1671 (lambda: getentry(0), b'retrieve index entry for rev 0'),
1658 1672 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
1659 1673 (lambda: resolvenode(node0), b'look up node at rev 0'),
1660 1674 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
1661 1675 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
1662 1676 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
1663 1677 (lambda: resolvenode(node100), b'look up node at tip'),
1664 1678 # 2x variation is to measure caching impact.
1665 1679 (lambda: resolvenodes(allnodes),
1666 1680 b'look up all nodes (forward)'),
1667 1681 (lambda: resolvenodes(allnodes, 2),
1668 1682 b'look up all nodes 2x (forward)'),
1669 1683 (lambda: resolvenodes(allnodesrev),
1670 1684 b'look up all nodes (reverse)'),
1671 1685 (lambda: resolvenodes(allnodesrev, 2),
1672 1686 b'look up all nodes 2x (reverse)'),
1673 1687 (lambda: getentries(allrevs),
1674 1688 b'retrieve all index entries (forward)'),
1675 1689 (lambda: getentries(allrevs, 2),
1676 1690 b'retrieve all index entries 2x (forward)'),
1677 1691 (lambda: getentries(allrevsrev),
1678 1692 b'retrieve all index entries (reverse)'),
1679 1693 (lambda: getentries(allrevsrev, 2),
1680 1694 b'retrieve all index entries 2x (reverse)'),
1681 1695 ]
1682 1696
1683 1697 for fn, title in benches:
1684 1698 timer, fm = gettimer(ui, opts)
1685 1699 timer(fn, title=title)
1686 1700 fm.end()
1687 1701
1688 1702 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
1689 1703 [(b'd', b'dist', 100, b'distance between the revisions'),
1690 1704 (b's', b'startrev', 0, b'revision to start reading at'),
1691 1705 (b'', b'reverse', False, b'read in reverse')],
1692 1706 b'-c|-m|FILE')
1693 1707 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
1694 1708 **opts):
1695 1709 """Benchmark reading a series of revisions from a revlog.
1696 1710
1697 1711 By default, we read every ``-d/--dist`` revision from 0 to tip of
1698 1712 the specified revlog.
1699 1713
1700 1714 The start revision can be defined via ``-s/--startrev``.
1701 1715 """
1702 1716 opts = _byteskwargs(opts)
1703 1717
1704 1718 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
1705 1719 rllen = getlen(ui)(rl)
1706 1720
1707 1721 if startrev < 0:
1708 1722 startrev = rllen + startrev
1709 1723
1710 1724 def d():
1711 1725 rl.clearcaches()
1712 1726
1713 1727 beginrev = startrev
1714 1728 endrev = rllen
1715 1729 dist = opts[b'dist']
1716 1730
1717 1731 if reverse:
1718 1732 beginrev, endrev = endrev - 1, beginrev - 1
1719 1733 dist = -1 * dist
1720 1734
1721 1735 for x in _xrange(beginrev, endrev, dist):
1722 1736 # Old revisions don't support passing int.
1723 1737 n = rl.node(x)
1724 1738 rl.revision(n)
1725 1739
1726 1740 timer, fm = gettimer(ui, opts)
1727 1741 timer(d)
1728 1742 fm.end()
1729 1743
1730 1744 @command(b'perfrevlogwrite', revlogopts + formatteropts +
1731 1745 [(b's', b'startrev', 1000, b'revision to start writing at'),
1732 1746 (b'', b'stoprev', -1, b'last revision to write'),
1733 1747 (b'', b'count', 3, b'last revision to write'),
1734 1748 (b'', b'details', False, b'print timing for every revisions tested'),
1735 1749 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
1736 1750 (b'', b'lazydeltabase', True, b'try the provided delta first'),
1737 1751 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1738 1752 ],
1739 1753 b'-c|-m|FILE')
1740 1754 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
1741 1755 """Benchmark writing a series of revisions to a revlog.
1742 1756
1743 1757 Possible source values are:
1744 1758 * `full`: add from a full text (default).
1745 1759 * `parent-1`: add from a delta to the first parent
1746 1760 * `parent-2`: add from a delta to the second parent if it exists
1747 1761 (use a delta from the first parent otherwise)
1748 1762 * `parent-smallest`: add from the smallest delta (either p1 or p2)
1749 1763 * `storage`: add from the existing precomputed deltas
1750 1764 """
1751 1765 opts = _byteskwargs(opts)
1752 1766
1753 1767 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
1754 1768 rllen = getlen(ui)(rl)
1755 1769 if startrev < 0:
1756 1770 startrev = rllen + startrev
1757 1771 if stoprev < 0:
1758 1772 stoprev = rllen + stoprev
1759 1773
1760 1774 lazydeltabase = opts['lazydeltabase']
1761 1775 source = opts['source']
1762 1776 clearcaches = opts['clear_caches']
1763 1777 validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest',
1764 1778 b'storage')
1765 1779 if source not in validsource:
1766 1780 raise error.Abort('invalid source type: %s' % source)
1767 1781
1768 1782 ### actually gather results
1769 1783 count = opts['count']
1770 1784 if count <= 0:
1771 1785 raise error.Abort('invalide run count: %d' % count)
1772 1786 allresults = []
1773 1787 for c in range(count):
1774 1788 timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1,
1775 1789 lazydeltabase=lazydeltabase,
1776 1790 clearcaches=clearcaches)
1777 1791 allresults.append(timing)
1778 1792
1779 1793 ### consolidate the results in a single list
1780 1794 results = []
1781 1795 for idx, (rev, t) in enumerate(allresults[0]):
1782 1796 ts = [t]
1783 1797 for other in allresults[1:]:
1784 1798 orev, ot = other[idx]
1785 1799 assert orev == rev
1786 1800 ts.append(ot)
1787 1801 results.append((rev, ts))
1788 1802 resultcount = len(results)
1789 1803
1790 1804 ### Compute and display relevant statistics
1791 1805
1792 1806 # get a formatter
1793 1807 fm = ui.formatter(b'perf', opts)
1794 1808 displayall = ui.configbool(b"perf", b"all-timing", False)
1795 1809
1796 1810 # print individual details if requested
1797 1811 if opts['details']:
1798 1812 for idx, item in enumerate(results, 1):
1799 1813 rev, data = item
1800 1814 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
1801 1815 formatone(fm, data, title=title, displayall=displayall)
1802 1816
1803 1817 # sorts results by median time
1804 1818 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
1805 1819 # list of (name, index) to display)
1806 1820 relevants = [
1807 1821 ("min", 0),
1808 1822 ("10%", resultcount * 10 // 100),
1809 1823 ("25%", resultcount * 25 // 100),
1810 1824 ("50%", resultcount * 70 // 100),
1811 1825 ("75%", resultcount * 75 // 100),
1812 1826 ("90%", resultcount * 90 // 100),
1813 1827 ("95%", resultcount * 95 // 100),
1814 1828 ("99%", resultcount * 99 // 100),
1815 1829 ("99.9%", resultcount * 999 // 1000),
1816 1830 ("99.99%", resultcount * 9999 // 10000),
1817 1831 ("99.999%", resultcount * 99999 // 100000),
1818 1832 ("max", -1),
1819 1833 ]
1820 1834 if not ui.quiet:
1821 1835 for name, idx in relevants:
1822 1836 data = results[idx]
1823 1837 title = '%s of %d, rev %d' % (name, resultcount, data[0])
1824 1838 formatone(fm, data[1], title=title, displayall=displayall)
1825 1839
1826 1840 # XXX summing that many float will not be very precise, we ignore this fact
1827 1841 # for now
1828 1842 totaltime = []
1829 1843 for item in allresults:
1830 1844 totaltime.append((sum(x[1][0] for x in item),
1831 1845 sum(x[1][1] for x in item),
1832 1846 sum(x[1][2] for x in item),)
1833 1847 )
1834 1848 formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
1835 1849 displayall=displayall)
1836 1850 fm.end()
1837 1851
1838 1852 class _faketr(object):
1839 1853 def add(s, x, y, z=None):
1840 1854 return None
1841 1855
1842 1856 def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None,
1843 1857 lazydeltabase=True, clearcaches=True):
1844 1858 timings = []
1845 1859 tr = _faketr()
1846 1860 with _temprevlog(ui, orig, startrev) as dest:
1847 1861 dest._lazydeltabase = lazydeltabase
1848 1862 revs = list(orig.revs(startrev, stoprev))
1849 1863 total = len(revs)
1850 1864 topic = 'adding'
1851 1865 if runidx is not None:
1852 1866 topic += ' (run #%d)' % runidx
1853 1867 # Support both old and new progress API
1854 1868 if util.safehasattr(ui, 'makeprogress'):
1855 1869 progress = ui.makeprogress(topic, unit='revs', total=total)
1856 1870 def updateprogress(pos):
1857 1871 progress.update(pos)
1858 1872 def completeprogress():
1859 1873 progress.complete()
1860 1874 else:
1861 1875 def updateprogress(pos):
1862 1876 ui.progress(topic, pos, unit='revs', total=total)
1863 1877 def completeprogress():
1864 1878 ui.progress(topic, None, unit='revs', total=total)
1865 1879
1866 1880 for idx, rev in enumerate(revs):
1867 1881 updateprogress(idx)
1868 1882 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
1869 1883 if clearcaches:
1870 1884 dest.index.clearcaches()
1871 1885 dest.clearcaches()
1872 1886 with timeone() as r:
1873 1887 dest.addrawrevision(*addargs, **addkwargs)
1874 1888 timings.append((rev, r[0]))
1875 1889 updateprogress(total)
1876 1890 completeprogress()
1877 1891 return timings
1878 1892
1879 1893 def _getrevisionseed(orig, rev, tr, source):
1880 1894 from mercurial.node import nullid
1881 1895
1882 1896 linkrev = orig.linkrev(rev)
1883 1897 node = orig.node(rev)
1884 1898 p1, p2 = orig.parents(node)
1885 1899 flags = orig.flags(rev)
1886 1900 cachedelta = None
1887 1901 text = None
1888 1902
1889 1903 if source == b'full':
1890 1904 text = orig.revision(rev)
1891 1905 elif source == b'parent-1':
1892 1906 baserev = orig.rev(p1)
1893 1907 cachedelta = (baserev, orig.revdiff(p1, rev))
1894 1908 elif source == b'parent-2':
1895 1909 parent = p2
1896 1910 if p2 == nullid:
1897 1911 parent = p1
1898 1912 baserev = orig.rev(parent)
1899 1913 cachedelta = (baserev, orig.revdiff(parent, rev))
1900 1914 elif source == b'parent-smallest':
1901 1915 p1diff = orig.revdiff(p1, rev)
1902 1916 parent = p1
1903 1917 diff = p1diff
1904 1918 if p2 != nullid:
1905 1919 p2diff = orig.revdiff(p2, rev)
1906 1920 if len(p1diff) > len(p2diff):
1907 1921 parent = p2
1908 1922 diff = p2diff
1909 1923 baserev = orig.rev(parent)
1910 1924 cachedelta = (baserev, diff)
1911 1925 elif source == b'storage':
1912 1926 baserev = orig.deltaparent(rev)
1913 1927 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
1914 1928
1915 1929 return ((text, tr, linkrev, p1, p2),
1916 1930 {'node': node, 'flags': flags, 'cachedelta': cachedelta})
1917 1931
1918 1932 @contextlib.contextmanager
1919 1933 def _temprevlog(ui, orig, truncaterev):
1920 1934 from mercurial import vfs as vfsmod
1921 1935
1922 1936 if orig._inline:
1923 1937 raise error.Abort('not supporting inline revlog (yet)')
1924 1938
1925 1939 origindexpath = orig.opener.join(orig.indexfile)
1926 1940 origdatapath = orig.opener.join(orig.datafile)
1927 1941 indexname = 'revlog.i'
1928 1942 dataname = 'revlog.d'
1929 1943
1930 1944 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
1931 1945 try:
1932 1946 # copy the data file in a temporary directory
1933 1947 ui.debug('copying data in %s\n' % tmpdir)
1934 1948 destindexpath = os.path.join(tmpdir, 'revlog.i')
1935 1949 destdatapath = os.path.join(tmpdir, 'revlog.d')
1936 1950 shutil.copyfile(origindexpath, destindexpath)
1937 1951 shutil.copyfile(origdatapath, destdatapath)
1938 1952
1939 1953 # remove the data we want to add again
1940 1954 ui.debug('truncating data to be rewritten\n')
1941 1955 with open(destindexpath, 'ab') as index:
1942 1956 index.seek(0)
1943 1957 index.truncate(truncaterev * orig._io.size)
1944 1958 with open(destdatapath, 'ab') as data:
1945 1959 data.seek(0)
1946 1960 data.truncate(orig.start(truncaterev))
1947 1961
1948 1962 # instantiate a new revlog from the temporary copy
1949 1963 ui.debug('truncating adding to be rewritten\n')
1950 1964 vfs = vfsmod.vfs(tmpdir)
1951 1965 vfs.options = getattr(orig.opener, 'options', None)
1952 1966
1953 1967 dest = revlog.revlog(vfs,
1954 1968 indexfile=indexname,
1955 1969 datafile=dataname)
1956 1970 if dest._inline:
1957 1971 raise error.Abort('not supporting inline revlog (yet)')
1958 1972 # make sure internals are initialized
1959 1973 dest.revision(len(dest) - 1)
1960 1974 yield dest
1961 1975 del dest, vfs
1962 1976 finally:
1963 1977 shutil.rmtree(tmpdir, True)
1964 1978
1965 1979 @command(b'perfrevlogchunks', revlogopts + formatteropts +
1966 1980 [(b'e', b'engines', b'', b'compression engines to use'),
1967 1981 (b's', b'startrev', 0, b'revision to start at')],
1968 1982 b'-c|-m|FILE')
1969 1983 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
1970 1984 """Benchmark operations on revlog chunks.
1971 1985
1972 1986 Logically, each revlog is a collection of fulltext revisions. However,
1973 1987 stored within each revlog are "chunks" of possibly compressed data. This
1974 1988 data needs to be read and decompressed or compressed and written.
1975 1989
1976 1990 This command measures the time it takes to read+decompress and recompress
1977 1991 chunks in a revlog. It effectively isolates I/O and compression performance.
1978 1992 For measurements of higher-level operations like resolving revisions,
1979 1993 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
1980 1994 """
1981 1995 opts = _byteskwargs(opts)
1982 1996
1983 1997 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
1984 1998
1985 1999 # _chunkraw was renamed to _getsegmentforrevs.
1986 2000 try:
1987 2001 segmentforrevs = rl._getsegmentforrevs
1988 2002 except AttributeError:
1989 2003 segmentforrevs = rl._chunkraw
1990 2004
1991 2005 # Verify engines argument.
1992 2006 if engines:
1993 2007 engines = set(e.strip() for e in engines.split(b','))
1994 2008 for engine in engines:
1995 2009 try:
1996 2010 util.compressionengines[engine]
1997 2011 except KeyError:
1998 2012 raise error.Abort(b'unknown compression engine: %s' % engine)
1999 2013 else:
2000 2014 engines = []
2001 2015 for e in util.compengines:
2002 2016 engine = util.compengines[e]
2003 2017 try:
2004 2018 if engine.available():
2005 2019 engine.revlogcompressor().compress(b'dummy')
2006 2020 engines.append(e)
2007 2021 except NotImplementedError:
2008 2022 pass
2009 2023
2010 2024 revs = list(rl.revs(startrev, len(rl) - 1))
2011 2025
2012 2026 def rlfh(rl):
2013 2027 if rl._inline:
2014 2028 return getsvfs(repo)(rl.indexfile)
2015 2029 else:
2016 2030 return getsvfs(repo)(rl.datafile)
2017 2031
2018 2032 def doread():
2019 2033 rl.clearcaches()
2020 2034 for rev in revs:
2021 2035 segmentforrevs(rev, rev)
2022 2036
2023 2037 def doreadcachedfh():
2024 2038 rl.clearcaches()
2025 2039 fh = rlfh(rl)
2026 2040 for rev in revs:
2027 2041 segmentforrevs(rev, rev, df=fh)
2028 2042
2029 2043 def doreadbatch():
2030 2044 rl.clearcaches()
2031 2045 segmentforrevs(revs[0], revs[-1])
2032 2046
2033 2047 def doreadbatchcachedfh():
2034 2048 rl.clearcaches()
2035 2049 fh = rlfh(rl)
2036 2050 segmentforrevs(revs[0], revs[-1], df=fh)
2037 2051
2038 2052 def dochunk():
2039 2053 rl.clearcaches()
2040 2054 fh = rlfh(rl)
2041 2055 for rev in revs:
2042 2056 rl._chunk(rev, df=fh)
2043 2057
2044 2058 chunks = [None]
2045 2059
2046 2060 def dochunkbatch():
2047 2061 rl.clearcaches()
2048 2062 fh = rlfh(rl)
2049 2063 # Save chunks as a side-effect.
2050 2064 chunks[0] = rl._chunks(revs, df=fh)
2051 2065
2052 2066 def docompress(compressor):
2053 2067 rl.clearcaches()
2054 2068
2055 2069 try:
2056 2070 # Swap in the requested compression engine.
2057 2071 oldcompressor = rl._compressor
2058 2072 rl._compressor = compressor
2059 2073 for chunk in chunks[0]:
2060 2074 rl.compress(chunk)
2061 2075 finally:
2062 2076 rl._compressor = oldcompressor
2063 2077
2064 2078 benches = [
2065 2079 (lambda: doread(), b'read'),
2066 2080 (lambda: doreadcachedfh(), b'read w/ reused fd'),
2067 2081 (lambda: doreadbatch(), b'read batch'),
2068 2082 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
2069 2083 (lambda: dochunk(), b'chunk'),
2070 2084 (lambda: dochunkbatch(), b'chunk batch'),
2071 2085 ]
2072 2086
2073 2087 for engine in sorted(engines):
2074 2088 compressor = util.compengines[engine].revlogcompressor()
2075 2089 benches.append((functools.partial(docompress, compressor),
2076 2090 b'compress w/ %s' % engine))
2077 2091
2078 2092 for fn, title in benches:
2079 2093 timer, fm = gettimer(ui, opts)
2080 2094 timer(fn, title=title)
2081 2095 fm.end()
2082 2096
2083 2097 @command(b'perfrevlogrevision', revlogopts + formatteropts +
2084 2098 [(b'', b'cache', False, b'use caches instead of clearing')],
2085 2099 b'-c|-m|FILE REV')
2086 2100 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
2087 2101 """Benchmark obtaining a revlog revision.
2088 2102
2089 2103 Obtaining a revlog revision consists of roughly the following steps:
2090 2104
2091 2105 1. Compute the delta chain
2092 2106 2. Slice the delta chain if applicable
2093 2107 3. Obtain the raw chunks for that delta chain
2094 2108 4. Decompress each raw chunk
2095 2109 5. Apply binary patches to obtain fulltext
2096 2110 6. Verify hash of fulltext
2097 2111
2098 2112 This command measures the time spent in each of these phases.
2099 2113 """
2100 2114 opts = _byteskwargs(opts)
2101 2115
2102 2116 if opts.get(b'changelog') or opts.get(b'manifest'):
2103 2117 file_, rev = None, file_
2104 2118 elif rev is None:
2105 2119 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
2106 2120
2107 2121 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
2108 2122
2109 2123 # _chunkraw was renamed to _getsegmentforrevs.
2110 2124 try:
2111 2125 segmentforrevs = r._getsegmentforrevs
2112 2126 except AttributeError:
2113 2127 segmentforrevs = r._chunkraw
2114 2128
2115 2129 node = r.lookup(rev)
2116 2130 rev = r.rev(node)
2117 2131
2118 2132 def getrawchunks(data, chain):
2119 2133 start = r.start
2120 2134 length = r.length
2121 2135 inline = r._inline
2122 2136 iosize = r._io.size
2123 2137 buffer = util.buffer
2124 2138
2125 2139 chunks = []
2126 2140 ladd = chunks.append
2127 2141 for idx, item in enumerate(chain):
2128 2142 offset = start(item[0])
2129 2143 bits = data[idx]
2130 2144 for rev in item:
2131 2145 chunkstart = start(rev)
2132 2146 if inline:
2133 2147 chunkstart += (rev + 1) * iosize
2134 2148 chunklength = length(rev)
2135 2149 ladd(buffer(bits, chunkstart - offset, chunklength))
2136 2150
2137 2151 return chunks
2138 2152
2139 2153 def dodeltachain(rev):
2140 2154 if not cache:
2141 2155 r.clearcaches()
2142 2156 r._deltachain(rev)
2143 2157
2144 2158 def doread(chain):
2145 2159 if not cache:
2146 2160 r.clearcaches()
2147 2161 for item in slicedchain:
2148 2162 segmentforrevs(item[0], item[-1])
2149 2163
2150 2164 def doslice(r, chain, size):
2151 2165 for s in slicechunk(r, chain, targetsize=size):
2152 2166 pass
2153 2167
2154 2168 def dorawchunks(data, chain):
2155 2169 if not cache:
2156 2170 r.clearcaches()
2157 2171 getrawchunks(data, chain)
2158 2172
2159 2173 def dodecompress(chunks):
2160 2174 decomp = r.decompress
2161 2175 for chunk in chunks:
2162 2176 decomp(chunk)
2163 2177
2164 2178 def dopatch(text, bins):
2165 2179 if not cache:
2166 2180 r.clearcaches()
2167 2181 mdiff.patches(text, bins)
2168 2182
2169 2183 def dohash(text):
2170 2184 if not cache:
2171 2185 r.clearcaches()
2172 2186 r.checkhash(text, node, rev=rev)
2173 2187
2174 2188 def dorevision():
2175 2189 if not cache:
2176 2190 r.clearcaches()
2177 2191 r.revision(node)
2178 2192
2179 2193 try:
2180 2194 from mercurial.revlogutils.deltas import slicechunk
2181 2195 except ImportError:
2182 2196 slicechunk = getattr(revlog, '_slicechunk', None)
2183 2197
2184 2198 size = r.length(rev)
2185 2199 chain = r._deltachain(rev)[0]
2186 2200 if not getattr(r, '_withsparseread', False):
2187 2201 slicedchain = (chain,)
2188 2202 else:
2189 2203 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
2190 2204 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
2191 2205 rawchunks = getrawchunks(data, slicedchain)
2192 2206 bins = r._chunks(chain)
2193 2207 text = bytes(bins[0])
2194 2208 bins = bins[1:]
2195 2209 text = mdiff.patches(text, bins)
2196 2210
2197 2211 benches = [
2198 2212 (lambda: dorevision(), b'full'),
2199 2213 (lambda: dodeltachain(rev), b'deltachain'),
2200 2214 (lambda: doread(chain), b'read'),
2201 2215 ]
2202 2216
2203 2217 if getattr(r, '_withsparseread', False):
2204 2218 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
2205 2219 benches.append(slicing)
2206 2220
2207 2221 benches.extend([
2208 2222 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
2209 2223 (lambda: dodecompress(rawchunks), b'decompress'),
2210 2224 (lambda: dopatch(text, bins), b'patch'),
2211 2225 (lambda: dohash(text), b'hash'),
2212 2226 ])
2213 2227
2214 2228 timer, fm = gettimer(ui, opts)
2215 2229 for fn, title in benches:
2216 2230 timer(fn, title=title)
2217 2231 fm.end()
2218 2232
2219 2233 @command(b'perfrevset',
2220 2234 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
2221 2235 (b'', b'contexts', False, b'obtain changectx for each revision')]
2222 2236 + formatteropts, b"REVSET")
2223 2237 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
2224 2238 """benchmark the execution time of a revset
2225 2239
2226 2240 Use the --clean option if need to evaluate the impact of build volatile
2227 2241 revisions set cache on the revset execution. Volatile cache hold filtered
2228 2242 and obsolete related cache."""
2229 2243 opts = _byteskwargs(opts)
2230 2244
2231 2245 timer, fm = gettimer(ui, opts)
2232 2246 def d():
2233 2247 if clear:
2234 2248 repo.invalidatevolatilesets()
2235 2249 if contexts:
2236 2250 for ctx in repo.set(expr): pass
2237 2251 else:
2238 2252 for r in repo.revs(expr): pass
2239 2253 timer(d)
2240 2254 fm.end()
2241 2255
2242 2256 @command(b'perfvolatilesets',
2243 2257 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
2244 2258 ] + formatteropts)
2245 2259 def perfvolatilesets(ui, repo, *names, **opts):
2246 2260 """benchmark the computation of various volatile set
2247 2261
2248 2262 Volatile set computes element related to filtering and obsolescence."""
2249 2263 opts = _byteskwargs(opts)
2250 2264 timer, fm = gettimer(ui, opts)
2251 2265 repo = repo.unfiltered()
2252 2266
2253 2267 def getobs(name):
2254 2268 def d():
2255 2269 repo.invalidatevolatilesets()
2256 2270 if opts[b'clear_obsstore']:
2257 2271 clearfilecache(repo, b'obsstore')
2258 2272 obsolete.getrevs(repo, name)
2259 2273 return d
2260 2274
2261 2275 allobs = sorted(obsolete.cachefuncs)
2262 2276 if names:
2263 2277 allobs = [n for n in allobs if n in names]
2264 2278
2265 2279 for name in allobs:
2266 2280 timer(getobs(name), title=name)
2267 2281
2268 2282 def getfiltered(name):
2269 2283 def d():
2270 2284 repo.invalidatevolatilesets()
2271 2285 if opts[b'clear_obsstore']:
2272 2286 clearfilecache(repo, b'obsstore')
2273 2287 repoview.filterrevs(repo, name)
2274 2288 return d
2275 2289
2276 2290 allfilter = sorted(repoview.filtertable)
2277 2291 if names:
2278 2292 allfilter = [n for n in allfilter if n in names]
2279 2293
2280 2294 for name in allfilter:
2281 2295 timer(getfiltered(name), title=name)
2282 2296 fm.end()
2283 2297
2284 2298 @command(b'perfbranchmap',
2285 2299 [(b'f', b'full', False,
2286 2300 b'Includes build time of subset'),
2287 2301 (b'', b'clear-revbranch', False,
2288 2302 b'purge the revbranch cache between computation'),
2289 2303 ] + formatteropts)
2290 2304 def perfbranchmap(ui, repo, *filternames, **opts):
2291 2305 """benchmark the update of a branchmap
2292 2306
2293 2307 This benchmarks the full repo.branchmap() call with read and write disabled
2294 2308 """
2295 2309 opts = _byteskwargs(opts)
2296 2310 full = opts.get(b"full", False)
2297 2311 clear_revbranch = opts.get(b"clear_revbranch", False)
2298 2312 timer, fm = gettimer(ui, opts)
2299 2313 def getbranchmap(filtername):
2300 2314 """generate a benchmark function for the filtername"""
2301 2315 if filtername is None:
2302 2316 view = repo
2303 2317 else:
2304 2318 view = repo.filtered(filtername)
2305 2319 def d():
2306 2320 if clear_revbranch:
2307 2321 repo.revbranchcache()._clear()
2308 2322 if full:
2309 2323 view._branchcaches.clear()
2310 2324 else:
2311 2325 view._branchcaches.pop(filtername, None)
2312 2326 view.branchmap()
2313 2327 return d
2314 2328 # add filter in smaller subset to bigger subset
2315 2329 possiblefilters = set(repoview.filtertable)
2316 2330 if filternames:
2317 2331 possiblefilters &= set(filternames)
2318 2332 subsettable = getbranchmapsubsettable()
2319 2333 allfilters = []
2320 2334 while possiblefilters:
2321 2335 for name in possiblefilters:
2322 2336 subset = subsettable.get(name)
2323 2337 if subset not in possiblefilters:
2324 2338 break
2325 2339 else:
2326 2340 assert False, b'subset cycle %s!' % possiblefilters
2327 2341 allfilters.append(name)
2328 2342 possiblefilters.remove(name)
2329 2343
2330 2344 # warm the cache
2331 2345 if not full:
2332 2346 for name in allfilters:
2333 2347 repo.filtered(name).branchmap()
2334 2348 if not filternames or b'unfiltered' in filternames:
2335 2349 # add unfiltered
2336 2350 allfilters.append(None)
2337 2351
2338 2352 branchcacheread = safeattrsetter(branchmap, b'read')
2339 2353 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
2340 2354 branchcacheread.set(lambda repo: None)
2341 2355 branchcachewrite.set(lambda bc, repo: None)
2342 2356 try:
2343 2357 for name in allfilters:
2344 2358 printname = name
2345 2359 if name is None:
2346 2360 printname = b'unfiltered'
2347 2361 timer(getbranchmap(name), title=str(printname))
2348 2362 finally:
2349 2363 branchcacheread.restore()
2350 2364 branchcachewrite.restore()
2351 2365 fm.end()
2352 2366
2353 2367 @command(b'perfbranchmapupdate', [
2354 2368 (b'', b'base', [], b'subset of revision to start from'),
2355 2369 (b'', b'target', [], b'subset of revision to end with'),
2356 2370 (b'', b'clear-caches', False, b'clear cache between each runs')
2357 2371 ] + formatteropts)
2358 2372 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
2359 2373 """benchmark branchmap update from for <base> revs to <target> revs
2360 2374
2361 2375 If `--clear-caches` is passed, the following items will be reset before
2362 2376 each update:
2363 2377 * the changelog instance and associated indexes
2364 2378 * the rev-branch-cache instance
2365 2379
2366 2380 Examples:
2367 2381
2368 2382 # update for the one last revision
2369 2383 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
2370 2384
2371 2385 $ update for change coming with a new branch
2372 2386 $ hg perfbranchmapupdate --base 'stable' --target 'default'
2373 2387 """
2374 2388 from mercurial import branchmap
2375 2389 from mercurial import repoview
2376 2390 opts = _byteskwargs(opts)
2377 2391 timer, fm = gettimer(ui, opts)
2378 2392 clearcaches = opts[b'clear_caches']
2379 2393 unfi = repo.unfiltered()
2380 2394 x = [None] # used to pass data between closure
2381 2395
2382 2396 # we use a `list` here to avoid possible side effect from smartset
2383 2397 baserevs = list(scmutil.revrange(repo, base))
2384 2398 targetrevs = list(scmutil.revrange(repo, target))
2385 2399 if not baserevs:
2386 2400 raise error.Abort(b'no revisions selected for --base')
2387 2401 if not targetrevs:
2388 2402 raise error.Abort(b'no revisions selected for --target')
2389 2403
2390 2404 # make sure the target branchmap also contains the one in the base
2391 2405 targetrevs = list(set(baserevs) | set(targetrevs))
2392 2406 targetrevs.sort()
2393 2407
2394 2408 cl = repo.changelog
2395 2409 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
2396 2410 allbaserevs.sort()
2397 2411 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
2398 2412
2399 2413 newrevs = list(alltargetrevs.difference(allbaserevs))
2400 2414 newrevs.sort()
2401 2415
2402 2416 allrevs = frozenset(unfi.changelog.revs())
2403 2417 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
2404 2418 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
2405 2419
2406 2420 def basefilter(repo, visibilityexceptions=None):
2407 2421 return basefilterrevs
2408 2422
2409 2423 def targetfilter(repo, visibilityexceptions=None):
2410 2424 return targetfilterrevs
2411 2425
2412 2426 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
2413 2427 ui.status(msg % (len(allbaserevs), len(newrevs)))
2414 2428 if targetfilterrevs:
2415 2429 msg = b'(%d revisions still filtered)\n'
2416 2430 ui.status(msg % len(targetfilterrevs))
2417 2431
2418 2432 try:
2419 2433 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
2420 2434 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
2421 2435
2422 2436 baserepo = repo.filtered(b'__perf_branchmap_update_base')
2423 2437 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
2424 2438
2425 2439 # try to find an existing branchmap to reuse
2426 2440 subsettable = getbranchmapsubsettable()
2427 2441 candidatefilter = subsettable.get(None)
2428 2442 while candidatefilter is not None:
2429 2443 candidatebm = repo.filtered(candidatefilter).branchmap()
2430 2444 if candidatebm.validfor(baserepo):
2431 2445 filtered = repoview.filterrevs(repo, candidatefilter)
2432 2446 missing = [r for r in allbaserevs if r in filtered]
2433 2447 base = candidatebm.copy()
2434 2448 base.update(baserepo, missing)
2435 2449 break
2436 2450 candidatefilter = subsettable.get(candidatefilter)
2437 2451 else:
2438 2452 # no suitable subset where found
2439 2453 base = branchmap.branchcache()
2440 2454 base.update(baserepo, allbaserevs)
2441 2455
2442 2456 def setup():
2443 2457 x[0] = base.copy()
2444 2458 if clearcaches:
2445 2459 unfi._revbranchcache = None
2446 2460 clearchangelog(repo)
2447 2461
2448 2462 def bench():
2449 2463 x[0].update(targetrepo, newrevs)
2450 2464
2451 2465 timer(bench, setup=setup)
2452 2466 fm.end()
2453 2467 finally:
2454 2468 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
2455 2469 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
2456 2470
2457 2471 @command(b'perfbranchmapload', [
2458 2472 (b'f', b'filter', b'', b'Specify repoview filter'),
2459 2473 (b'', b'list', False, b'List brachmap filter caches'),
2460 2474 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
2461 2475
2462 2476 ] + formatteropts)
2463 2477 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
2464 2478 """benchmark reading the branchmap"""
2465 2479 opts = _byteskwargs(opts)
2466 2480 clearrevlogs = opts[b'clear_revlogs']
2467 2481
2468 2482 if list:
2469 2483 for name, kind, st in repo.cachevfs.readdir(stat=True):
2470 2484 if name.startswith(b'branch2'):
2471 2485 filtername = name.partition(b'-')[2] or b'unfiltered'
2472 2486 ui.status(b'%s - %s\n'
2473 2487 % (filtername, util.bytecount(st.st_size)))
2474 2488 return
2475 2489 if not filter:
2476 2490 filter = None
2477 2491 subsettable = getbranchmapsubsettable()
2478 2492 if filter is None:
2479 2493 repo = repo.unfiltered()
2480 2494 else:
2481 2495 repo = repoview.repoview(repo, filter)
2482 2496
2483 2497 repo.branchmap() # make sure we have a relevant, up to date branchmap
2484 2498
2485 2499 currentfilter = filter
2486 2500 # try once without timer, the filter may not be cached
2487 2501 while branchmap.read(repo) is None:
2488 2502 currentfilter = subsettable.get(currentfilter)
2489 2503 if currentfilter is None:
2490 2504 raise error.Abort(b'No branchmap cached for %s repo'
2491 2505 % (filter or b'unfiltered'))
2492 2506 repo = repo.filtered(currentfilter)
2493 2507 timer, fm = gettimer(ui, opts)
2494 2508 def setup():
2495 2509 if clearrevlogs:
2496 2510 clearchangelog(repo)
2497 2511 def bench():
2498 2512 branchmap.read(repo)
2499 2513 timer(bench, setup=setup)
2500 2514 fm.end()
2501 2515
2502 2516 @command(b'perfloadmarkers')
2503 2517 def perfloadmarkers(ui, repo):
2504 2518 """benchmark the time to parse the on-disk markers for a repo
2505 2519
2506 2520 Result is the number of markers in the repo."""
2507 2521 timer, fm = gettimer(ui)
2508 2522 svfs = getsvfs(repo)
2509 2523 timer(lambda: len(obsolete.obsstore(svfs)))
2510 2524 fm.end()
2511 2525
2512 2526 @command(b'perflrucachedict', formatteropts +
2513 2527 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
2514 2528 (b'', b'mincost', 0, b'smallest cost of items in cache'),
2515 2529 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
2516 2530 (b'', b'size', 4, b'size of cache'),
2517 2531 (b'', b'gets', 10000, b'number of key lookups'),
2518 2532 (b'', b'sets', 10000, b'number of key sets'),
2519 2533 (b'', b'mixed', 10000, b'number of mixed mode operations'),
2520 2534 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
2521 2535 norepo=True)
2522 2536 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
2523 2537 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
2524 2538 opts = _byteskwargs(opts)
2525 2539
2526 2540 def doinit():
2527 2541 for i in _xrange(10000):
2528 2542 util.lrucachedict(size)
2529 2543
2530 2544 costrange = list(range(mincost, maxcost + 1))
2531 2545
2532 2546 values = []
2533 2547 for i in _xrange(size):
2534 2548 values.append(random.randint(0, _maxint))
2535 2549
2536 2550 # Get mode fills the cache and tests raw lookup performance with no
2537 2551 # eviction.
2538 2552 getseq = []
2539 2553 for i in _xrange(gets):
2540 2554 getseq.append(random.choice(values))
2541 2555
2542 2556 def dogets():
2543 2557 d = util.lrucachedict(size)
2544 2558 for v in values:
2545 2559 d[v] = v
2546 2560 for key in getseq:
2547 2561 value = d[key]
2548 2562 value # silence pyflakes warning
2549 2563
2550 2564 def dogetscost():
2551 2565 d = util.lrucachedict(size, maxcost=costlimit)
2552 2566 for i, v in enumerate(values):
2553 2567 d.insert(v, v, cost=costs[i])
2554 2568 for key in getseq:
2555 2569 try:
2556 2570 value = d[key]
2557 2571 value # silence pyflakes warning
2558 2572 except KeyError:
2559 2573 pass
2560 2574
2561 2575 # Set mode tests insertion speed with cache eviction.
2562 2576 setseq = []
2563 2577 costs = []
2564 2578 for i in _xrange(sets):
2565 2579 setseq.append(random.randint(0, _maxint))
2566 2580 costs.append(random.choice(costrange))
2567 2581
2568 2582 def doinserts():
2569 2583 d = util.lrucachedict(size)
2570 2584 for v in setseq:
2571 2585 d.insert(v, v)
2572 2586
2573 2587 def doinsertscost():
2574 2588 d = util.lrucachedict(size, maxcost=costlimit)
2575 2589 for i, v in enumerate(setseq):
2576 2590 d.insert(v, v, cost=costs[i])
2577 2591
2578 2592 def dosets():
2579 2593 d = util.lrucachedict(size)
2580 2594 for v in setseq:
2581 2595 d[v] = v
2582 2596
2583 2597 # Mixed mode randomly performs gets and sets with eviction.
2584 2598 mixedops = []
2585 2599 for i in _xrange(mixed):
2586 2600 r = random.randint(0, 100)
2587 2601 if r < mixedgetfreq:
2588 2602 op = 0
2589 2603 else:
2590 2604 op = 1
2591 2605
2592 2606 mixedops.append((op,
2593 2607 random.randint(0, size * 2),
2594 2608 random.choice(costrange)))
2595 2609
2596 2610 def domixed():
2597 2611 d = util.lrucachedict(size)
2598 2612
2599 2613 for op, v, cost in mixedops:
2600 2614 if op == 0:
2601 2615 try:
2602 2616 d[v]
2603 2617 except KeyError:
2604 2618 pass
2605 2619 else:
2606 2620 d[v] = v
2607 2621
2608 2622 def domixedcost():
2609 2623 d = util.lrucachedict(size, maxcost=costlimit)
2610 2624
2611 2625 for op, v, cost in mixedops:
2612 2626 if op == 0:
2613 2627 try:
2614 2628 d[v]
2615 2629 except KeyError:
2616 2630 pass
2617 2631 else:
2618 2632 d.insert(v, v, cost=cost)
2619 2633
2620 2634 benches = [
2621 2635 (doinit, b'init'),
2622 2636 ]
2623 2637
2624 2638 if costlimit:
2625 2639 benches.extend([
2626 2640 (dogetscost, b'gets w/ cost limit'),
2627 2641 (doinsertscost, b'inserts w/ cost limit'),
2628 2642 (domixedcost, b'mixed w/ cost limit'),
2629 2643 ])
2630 2644 else:
2631 2645 benches.extend([
2632 2646 (dogets, b'gets'),
2633 2647 (doinserts, b'inserts'),
2634 2648 (dosets, b'sets'),
2635 2649 (domixed, b'mixed')
2636 2650 ])
2637 2651
2638 2652 for fn, title in benches:
2639 2653 timer, fm = gettimer(ui, opts)
2640 2654 timer(fn, title=title)
2641 2655 fm.end()
2642 2656
2643 2657 @command(b'perfwrite', formatteropts)
2644 2658 def perfwrite(ui, repo, **opts):
2645 2659 """microbenchmark ui.write
2646 2660 """
2647 2661 opts = _byteskwargs(opts)
2648 2662
2649 2663 timer, fm = gettimer(ui, opts)
2650 2664 def write():
2651 2665 for i in range(100000):
2652 2666 ui.write((b'Testing write performance\n'))
2653 2667 timer(write)
2654 2668 fm.end()
2655 2669
2656 2670 def uisetup(ui):
2657 2671 if (util.safehasattr(cmdutil, b'openrevlog') and
2658 2672 not util.safehasattr(commands, b'debugrevlogopts')):
2659 2673 # for "historical portability":
2660 2674 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
2661 2675 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
2662 2676 # openrevlog() should cause failure, because it has been
2663 2677 # available since 3.5 (or 49c583ca48c4).
2664 2678 def openrevlog(orig, repo, cmd, file_, opts):
2665 2679 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
2666 2680 raise error.Abort(b"This version doesn't support --dir option",
2667 2681 hint=b"use 3.5 or later")
2668 2682 return orig(repo, cmd, file_, opts)
2669 2683 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
2670 2684
2671 2685 @command(b'perfprogress', formatteropts + [
2672 2686 (b'', b'topic', b'topic', b'topic for progress messages'),
2673 2687 (b'c', b'total', 1000000, b'total value we are progressing to'),
2674 2688 ], norepo=True)
2675 2689 def perfprogress(ui, topic=None, total=None, **opts):
2676 2690 """printing of progress bars"""
2677 2691 opts = _byteskwargs(opts)
2678 2692
2679 2693 timer, fm = gettimer(ui, opts)
2680 2694
2681 2695 def doprogress():
2682 2696 with ui.makeprogress(topic, total=total) as progress:
2683 2697 for i in pycompat.xrange(total):
2684 2698 progress.increment()
2685 2699
2686 2700 timer(doprogress)
2687 2701 fm.end()
General Comments 0
You need to be logged in to leave comments. Login now