##// END OF EJS Templates
perf: move cache clearing in the `setup` step of `perfheads`...
Boris Feld -
r41481:ab6d1f82 default
parent child Browse files
Show More
@@ -1,2674 +1,2675 b''
1 1 # perf.py - performance test routines
2 2 '''helper extension to measure performance'''
3 3
4 4 # "historical portability" policy of perf.py:
5 5 #
6 6 # We have to do:
7 7 # - make perf.py "loadable" with as wide Mercurial version as possible
8 8 # This doesn't mean that perf commands work correctly with that Mercurial.
9 9 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
10 10 # - make historical perf command work correctly with as wide Mercurial
11 11 # version as possible
12 12 #
13 13 # We have to do, if possible with reasonable cost:
14 14 # - make recent perf command for historical feature work correctly
15 15 # with early Mercurial
16 16 #
17 17 # We don't have to do:
18 18 # - make perf command for recent feature work correctly with early
19 19 # Mercurial
20 20
21 21 from __future__ import absolute_import
22 22 import contextlib
23 23 import functools
24 24 import gc
25 25 import os
26 26 import random
27 27 import shutil
28 28 import struct
29 29 import sys
30 30 import tempfile
31 31 import threading
32 32 import time
33 33 from mercurial import (
34 34 changegroup,
35 35 cmdutil,
36 36 commands,
37 37 copies,
38 38 error,
39 39 extensions,
40 40 hg,
41 41 mdiff,
42 42 merge,
43 43 revlog,
44 44 util,
45 45 )
46 46
47 47 # for "historical portability":
48 48 # try to import modules separately (in dict order), and ignore
49 49 # failure, because these aren't available with early Mercurial
50 50 try:
51 51 from mercurial import branchmap # since 2.5 (or bcee63733aad)
52 52 except ImportError:
53 53 pass
54 54 try:
55 55 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
56 56 except ImportError:
57 57 pass
58 58 try:
59 59 from mercurial import registrar # since 3.7 (or 37d50250b696)
60 60 dir(registrar) # forcibly load it
61 61 except ImportError:
62 62 registrar = None
63 63 try:
64 64 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
65 65 except ImportError:
66 66 pass
67 67 try:
68 68 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
69 69 except ImportError:
70 70 pass
71 71 try:
72 72 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
73 73 except ImportError:
74 74 pass
75 75
76 76
77 77 def identity(a):
78 78 return a
79 79
80 80 try:
81 81 from mercurial import pycompat
82 82 getargspec = pycompat.getargspec # added to module after 4.5
83 83 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
84 84 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
85 85 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
86 86 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
87 87 if pycompat.ispy3:
88 88 _maxint = sys.maxsize # per py3 docs for replacing maxint
89 89 else:
90 90 _maxint = sys.maxint
91 91 except (ImportError, AttributeError):
92 92 import inspect
93 93 getargspec = inspect.getargspec
94 94 _byteskwargs = identity
95 95 fsencode = identity # no py3 support
96 96 _maxint = sys.maxint # no py3 support
97 97 _sysstr = lambda x: x # no py3 support
98 98 _xrange = xrange
99 99
100 100 try:
101 101 # 4.7+
102 102 queue = pycompat.queue.Queue
103 103 except (AttributeError, ImportError):
104 104 # <4.7.
105 105 try:
106 106 queue = pycompat.queue
107 107 except (AttributeError, ImportError):
108 108 queue = util.queue
109 109
110 110 try:
111 111 from mercurial import logcmdutil
112 112 makelogtemplater = logcmdutil.maketemplater
113 113 except (AttributeError, ImportError):
114 114 try:
115 115 makelogtemplater = cmdutil.makelogtemplater
116 116 except (AttributeError, ImportError):
117 117 makelogtemplater = None
118 118
119 119 # for "historical portability":
120 120 # define util.safehasattr forcibly, because util.safehasattr has been
121 121 # available since 1.9.3 (or 94b200a11cf7)
122 122 _undefined = object()
123 123 def safehasattr(thing, attr):
124 124 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
125 125 setattr(util, 'safehasattr', safehasattr)
126 126
127 127 # for "historical portability":
128 128 # define util.timer forcibly, because util.timer has been available
129 129 # since ae5d60bb70c9
130 130 if safehasattr(time, 'perf_counter'):
131 131 util.timer = time.perf_counter
132 132 elif os.name == b'nt':
133 133 util.timer = time.clock
134 134 else:
135 135 util.timer = time.time
136 136
137 137 # for "historical portability":
138 138 # use locally defined empty option list, if formatteropts isn't
139 139 # available, because commands.formatteropts has been available since
140 140 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
141 141 # available since 2.2 (or ae5f92e154d3)
142 142 formatteropts = getattr(cmdutil, "formatteropts",
143 143 getattr(commands, "formatteropts", []))
144 144
145 145 # for "historical portability":
146 146 # use locally defined option list, if debugrevlogopts isn't available,
147 147 # because commands.debugrevlogopts has been available since 3.7 (or
148 148 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
149 149 # since 1.9 (or a79fea6b3e77).
150 150 revlogopts = getattr(cmdutil, "debugrevlogopts",
151 151 getattr(commands, "debugrevlogopts", [
152 152 (b'c', b'changelog', False, (b'open changelog')),
153 153 (b'm', b'manifest', False, (b'open manifest')),
154 154 (b'', b'dir', False, (b'open directory manifest')),
155 155 ]))
156 156
157 157 cmdtable = {}
158 158
159 159 # for "historical portability":
160 160 # define parsealiases locally, because cmdutil.parsealiases has been
161 161 # available since 1.5 (or 6252852b4332)
162 162 def parsealiases(cmd):
163 163 return cmd.split(b"|")
164 164
165 165 if safehasattr(registrar, 'command'):
166 166 command = registrar.command(cmdtable)
167 167 elif safehasattr(cmdutil, 'command'):
168 168 command = cmdutil.command(cmdtable)
169 169 if b'norepo' not in getargspec(command).args:
170 170 # for "historical portability":
171 171 # wrap original cmdutil.command, because "norepo" option has
172 172 # been available since 3.1 (or 75a96326cecb)
173 173 _command = command
174 174 def command(name, options=(), synopsis=None, norepo=False):
175 175 if norepo:
176 176 commands.norepo += b' %s' % b' '.join(parsealiases(name))
177 177 return _command(name, list(options), synopsis)
178 178 else:
179 179 # for "historical portability":
180 180 # define "@command" annotation locally, because cmdutil.command
181 181 # has been available since 1.9 (or 2daa5179e73f)
182 182 def command(name, options=(), synopsis=None, norepo=False):
183 183 def decorator(func):
184 184 if synopsis:
185 185 cmdtable[name] = func, list(options), synopsis
186 186 else:
187 187 cmdtable[name] = func, list(options)
188 188 if norepo:
189 189 commands.norepo += b' %s' % b' '.join(parsealiases(name))
190 190 return func
191 191 return decorator
192 192
193 193 try:
194 194 import mercurial.registrar
195 195 import mercurial.configitems
196 196 configtable = {}
197 197 configitem = mercurial.registrar.configitem(configtable)
198 198 configitem(b'perf', b'presleep',
199 199 default=mercurial.configitems.dynamicdefault,
200 200 )
201 201 configitem(b'perf', b'stub',
202 202 default=mercurial.configitems.dynamicdefault,
203 203 )
204 204 configitem(b'perf', b'parentscount',
205 205 default=mercurial.configitems.dynamicdefault,
206 206 )
207 207 configitem(b'perf', b'all-timing',
208 208 default=mercurial.configitems.dynamicdefault,
209 209 )
210 210 except (ImportError, AttributeError):
211 211 pass
212 212
213 213 def getlen(ui):
214 214 if ui.configbool(b"perf", b"stub", False):
215 215 return lambda x: 1
216 216 return len
217 217
218 218 def gettimer(ui, opts=None):
219 219 """return a timer function and formatter: (timer, formatter)
220 220
221 221 This function exists to gather the creation of formatter in a single
222 222 place instead of duplicating it in all performance commands."""
223 223
224 224 # enforce an idle period before execution to counteract power management
225 225 # experimental config: perf.presleep
226 226 time.sleep(getint(ui, b"perf", b"presleep", 1))
227 227
228 228 if opts is None:
229 229 opts = {}
230 230 # redirect all to stderr unless buffer api is in use
231 231 if not ui._buffers:
232 232 ui = ui.copy()
233 233 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
234 234 if uifout:
235 235 # for "historical portability":
236 236 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
237 237 uifout.set(ui.ferr)
238 238
239 239 # get a formatter
240 240 uiformatter = getattr(ui, 'formatter', None)
241 241 if uiformatter:
242 242 fm = uiformatter(b'perf', opts)
243 243 else:
244 244 # for "historical portability":
245 245 # define formatter locally, because ui.formatter has been
246 246 # available since 2.2 (or ae5f92e154d3)
247 247 from mercurial import node
248 248 class defaultformatter(object):
249 249 """Minimized composition of baseformatter and plainformatter
250 250 """
251 251 def __init__(self, ui, topic, opts):
252 252 self._ui = ui
253 253 if ui.debugflag:
254 254 self.hexfunc = node.hex
255 255 else:
256 256 self.hexfunc = node.short
257 257 def __nonzero__(self):
258 258 return False
259 259 __bool__ = __nonzero__
260 260 def startitem(self):
261 261 pass
262 262 def data(self, **data):
263 263 pass
264 264 def write(self, fields, deftext, *fielddata, **opts):
265 265 self._ui.write(deftext % fielddata, **opts)
266 266 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
267 267 if cond:
268 268 self._ui.write(deftext % fielddata, **opts)
269 269 def plain(self, text, **opts):
270 270 self._ui.write(text, **opts)
271 271 def end(self):
272 272 pass
273 273 fm = defaultformatter(ui, b'perf', opts)
274 274
275 275 # stub function, runs code only once instead of in a loop
276 276 # experimental config: perf.stub
277 277 if ui.configbool(b"perf", b"stub", False):
278 278 return functools.partial(stub_timer, fm), fm
279 279
280 280 # experimental config: perf.all-timing
281 281 displayall = ui.configbool(b"perf", b"all-timing", False)
282 282 return functools.partial(_timer, fm, displayall=displayall), fm
283 283
284 284 def stub_timer(fm, func, setup=None, title=None):
285 285 if setup is not None:
286 286 setup()
287 287 func()
288 288
289 289 @contextlib.contextmanager
290 290 def timeone():
291 291 r = []
292 292 ostart = os.times()
293 293 cstart = util.timer()
294 294 yield r
295 295 cstop = util.timer()
296 296 ostop = os.times()
297 297 a, b = ostart, ostop
298 298 r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
299 299
300 300 def _timer(fm, func, setup=None, title=None, displayall=False):
301 301 gc.collect()
302 302 results = []
303 303 begin = util.timer()
304 304 count = 0
305 305 while True:
306 306 if setup is not None:
307 307 setup()
308 308 with timeone() as item:
309 309 r = func()
310 310 count += 1
311 311 results.append(item[0])
312 312 cstop = util.timer()
313 313 if cstop - begin > 3 and count >= 100:
314 314 break
315 315 if cstop - begin > 10 and count >= 3:
316 316 break
317 317
318 318 formatone(fm, results, title=title, result=r,
319 319 displayall=displayall)
320 320
321 321 def formatone(fm, timings, title=None, result=None, displayall=False):
322 322
323 323 count = len(timings)
324 324
325 325 fm.startitem()
326 326
327 327 if title:
328 328 fm.write(b'title', b'! %s\n', title)
329 329 if result:
330 330 fm.write(b'result', b'! result: %s\n', result)
331 331 def display(role, entry):
332 332 prefix = b''
333 333 if role != b'best':
334 334 prefix = b'%s.' % role
335 335 fm.plain(b'!')
336 336 fm.write(prefix + b'wall', b' wall %f', entry[0])
337 337 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
338 338 fm.write(prefix + b'user', b' user %f', entry[1])
339 339 fm.write(prefix + b'sys', b' sys %f', entry[2])
340 340 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
341 341 fm.plain(b'\n')
342 342 timings.sort()
343 343 min_val = timings[0]
344 344 display(b'best', min_val)
345 345 if displayall:
346 346 max_val = timings[-1]
347 347 display(b'max', max_val)
348 348 avg = tuple([sum(x) / count for x in zip(*timings)])
349 349 display(b'avg', avg)
350 350 median = timings[len(timings) // 2]
351 351 display(b'median', median)
352 352
353 353 # utilities for historical portability
354 354
355 355 def getint(ui, section, name, default):
356 356 # for "historical portability":
357 357 # ui.configint has been available since 1.9 (or fa2b596db182)
358 358 v = ui.config(section, name, None)
359 359 if v is None:
360 360 return default
361 361 try:
362 362 return int(v)
363 363 except ValueError:
364 364 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
365 365 % (section, name, v))
366 366
367 367 def safeattrsetter(obj, name, ignoremissing=False):
368 368 """Ensure that 'obj' has 'name' attribute before subsequent setattr
369 369
370 370 This function is aborted, if 'obj' doesn't have 'name' attribute
371 371 at runtime. This avoids overlooking removal of an attribute, which
372 372 breaks assumption of performance measurement, in the future.
373 373
374 374 This function returns the object to (1) assign a new value, and
375 375 (2) restore an original value to the attribute.
376 376
377 377 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
378 378 abortion, and this function returns None. This is useful to
379 379 examine an attribute, which isn't ensured in all Mercurial
380 380 versions.
381 381 """
382 382 if not util.safehasattr(obj, name):
383 383 if ignoremissing:
384 384 return None
385 385 raise error.Abort((b"missing attribute %s of %s might break assumption"
386 386 b" of performance measurement") % (name, obj))
387 387
388 388 origvalue = getattr(obj, _sysstr(name))
389 389 class attrutil(object):
390 390 def set(self, newvalue):
391 391 setattr(obj, _sysstr(name), newvalue)
392 392 def restore(self):
393 393 setattr(obj, _sysstr(name), origvalue)
394 394
395 395 return attrutil()
396 396
397 397 # utilities to examine each internal API changes
398 398
399 399 def getbranchmapsubsettable():
400 400 # for "historical portability":
401 401 # subsettable is defined in:
402 402 # - branchmap since 2.9 (or 175c6fd8cacc)
403 403 # - repoview since 2.5 (or 59a9f18d4587)
404 404 for mod in (branchmap, repoview):
405 405 subsettable = getattr(mod, 'subsettable', None)
406 406 if subsettable:
407 407 return subsettable
408 408
409 409 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
410 410 # branchmap and repoview modules exist, but subsettable attribute
411 411 # doesn't)
412 412 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
413 413 hint=b"use 2.5 or later")
414 414
415 415 def getsvfs(repo):
416 416 """Return appropriate object to access files under .hg/store
417 417 """
418 418 # for "historical portability":
419 419 # repo.svfs has been available since 2.3 (or 7034365089bf)
420 420 svfs = getattr(repo, 'svfs', None)
421 421 if svfs:
422 422 return svfs
423 423 else:
424 424 return getattr(repo, 'sopener')
425 425
426 426 def getvfs(repo):
427 427 """Return appropriate object to access files under .hg
428 428 """
429 429 # for "historical portability":
430 430 # repo.vfs has been available since 2.3 (or 7034365089bf)
431 431 vfs = getattr(repo, 'vfs', None)
432 432 if vfs:
433 433 return vfs
434 434 else:
435 435 return getattr(repo, 'opener')
436 436
437 437 def repocleartagscachefunc(repo):
438 438 """Return the function to clear tags cache according to repo internal API
439 439 """
440 440 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
441 441 # in this case, setattr(repo, '_tagscache', None) or so isn't
442 442 # correct way to clear tags cache, because existing code paths
443 443 # expect _tagscache to be a structured object.
444 444 def clearcache():
445 445 # _tagscache has been filteredpropertycache since 2.5 (or
446 446 # 98c867ac1330), and delattr() can't work in such case
447 447 if b'_tagscache' in vars(repo):
448 448 del repo.__dict__[b'_tagscache']
449 449 return clearcache
450 450
451 451 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
452 452 if repotags: # since 1.4 (or 5614a628d173)
453 453 return lambda : repotags.set(None)
454 454
455 455 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
456 456 if repotagscache: # since 0.6 (or d7df759d0e97)
457 457 return lambda : repotagscache.set(None)
458 458
459 459 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
460 460 # this point, but it isn't so problematic, because:
461 461 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
462 462 # in perftags() causes failure soon
463 463 # - perf.py itself has been available since 1.1 (or eb240755386d)
464 464 raise error.Abort((b"tags API of this hg command is unknown"))
465 465
466 466 # utilities to clear cache
467 467
468 468 def clearfilecache(obj, attrname):
469 469 unfiltered = getattr(obj, 'unfiltered', None)
470 470 if unfiltered is not None:
471 471 obj = obj.unfiltered()
472 472 if attrname in vars(obj):
473 473 delattr(obj, attrname)
474 474 obj._filecache.pop(attrname, None)
475 475
476 476 def clearchangelog(repo):
477 477 if repo is not repo.unfiltered():
478 478 object.__setattr__(repo, r'_clcachekey', None)
479 479 object.__setattr__(repo, r'_clcache', None)
480 480 clearfilecache(repo.unfiltered(), 'changelog')
481 481
482 482 # perf commands
483 483
484 484 @command(b'perfwalk', formatteropts)
485 485 def perfwalk(ui, repo, *pats, **opts):
486 486 opts = _byteskwargs(opts)
487 487 timer, fm = gettimer(ui, opts)
488 488 m = scmutil.match(repo[None], pats, {})
489 489 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
490 490 ignored=False))))
491 491 fm.end()
492 492
493 493 @command(b'perfannotate', formatteropts)
494 494 def perfannotate(ui, repo, f, **opts):
495 495 opts = _byteskwargs(opts)
496 496 timer, fm = gettimer(ui, opts)
497 497 fc = repo[b'.'][f]
498 498 timer(lambda: len(fc.annotate(True)))
499 499 fm.end()
500 500
501 501 @command(b'perfstatus',
502 502 [(b'u', b'unknown', False,
503 503 b'ask status to look for unknown files')] + formatteropts)
504 504 def perfstatus(ui, repo, **opts):
505 505 opts = _byteskwargs(opts)
506 506 #m = match.always(repo.root, repo.getcwd())
507 507 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
508 508 # False))))
509 509 timer, fm = gettimer(ui, opts)
510 510 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
511 511 fm.end()
512 512
513 513 @command(b'perfaddremove', formatteropts)
514 514 def perfaddremove(ui, repo, **opts):
515 515 opts = _byteskwargs(opts)
516 516 timer, fm = gettimer(ui, opts)
517 517 try:
518 518 oldquiet = repo.ui.quiet
519 519 repo.ui.quiet = True
520 520 matcher = scmutil.match(repo[None])
521 521 opts[b'dry_run'] = True
522 522 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
523 523 finally:
524 524 repo.ui.quiet = oldquiet
525 525 fm.end()
526 526
527 527 def clearcaches(cl):
528 528 # behave somewhat consistently across internal API changes
529 529 if util.safehasattr(cl, b'clearcaches'):
530 530 cl.clearcaches()
531 531 elif util.safehasattr(cl, b'_nodecache'):
532 532 from mercurial.node import nullid, nullrev
533 533 cl._nodecache = {nullid: nullrev}
534 534 cl._nodepos = None
535 535
536 536 @command(b'perfheads', formatteropts)
537 537 def perfheads(ui, repo, **opts):
538 538 """benchmark the computation of a changelog heads"""
539 539 opts = _byteskwargs(opts)
540 540 timer, fm = gettimer(ui, opts)
541 541 cl = repo.changelog
542 def s():
543 clearcaches(cl)
542 544 def d():
543 545 len(cl.headrevs())
544 clearcaches(cl)
545 timer(d)
546 timer(d, setup=s)
546 547 fm.end()
547 548
548 549 @command(b'perftags', formatteropts+
549 550 [
550 551 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
551 552 ])
552 553 def perftags(ui, repo, **opts):
553 554 opts = _byteskwargs(opts)
554 555 timer, fm = gettimer(ui, opts)
555 556 repocleartagscache = repocleartagscachefunc(repo)
556 557 clearrevlogs = opts[b'clear_revlogs']
557 558 def s():
558 559 if clearrevlogs:
559 560 clearchangelog(repo)
560 561 clearfilecache(repo.unfiltered(), 'manifest')
561 562 repocleartagscache()
562 563 def t():
563 564 return len(repo.tags())
564 565 timer(t, setup=s)
565 566 fm.end()
566 567
567 568 @command(b'perfancestors', formatteropts)
568 569 def perfancestors(ui, repo, **opts):
569 570 opts = _byteskwargs(opts)
570 571 timer, fm = gettimer(ui, opts)
571 572 heads = repo.changelog.headrevs()
572 573 def d():
573 574 for a in repo.changelog.ancestors(heads):
574 575 pass
575 576 timer(d)
576 577 fm.end()
577 578
578 579 @command(b'perfancestorset', formatteropts)
579 580 def perfancestorset(ui, repo, revset, **opts):
580 581 opts = _byteskwargs(opts)
581 582 timer, fm = gettimer(ui, opts)
582 583 revs = repo.revs(revset)
583 584 heads = repo.changelog.headrevs()
584 585 def d():
585 586 s = repo.changelog.ancestors(heads)
586 587 for rev in revs:
587 588 rev in s
588 589 timer(d)
589 590 fm.end()
590 591
591 592 @command(b'perfdiscovery', formatteropts, b'PATH')
592 593 def perfdiscovery(ui, repo, path, **opts):
593 594 """benchmark discovery between local repo and the peer at given path
594 595 """
595 596 repos = [repo, None]
596 597 timer, fm = gettimer(ui, opts)
597 598 path = ui.expandpath(path)
598 599
599 600 def s():
600 601 repos[1] = hg.peer(ui, opts, path)
601 602 def d():
602 603 setdiscovery.findcommonheads(ui, *repos)
603 604 timer(d, setup=s)
604 605 fm.end()
605 606
606 607 @command(b'perfbookmarks', formatteropts +
607 608 [
608 609 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
609 610 ])
610 611 def perfbookmarks(ui, repo, **opts):
611 612 """benchmark parsing bookmarks from disk to memory"""
612 613 opts = _byteskwargs(opts)
613 614 timer, fm = gettimer(ui, opts)
614 615
615 616 clearrevlogs = opts[b'clear_revlogs']
616 617 def s():
617 618 if clearrevlogs:
618 619 clearchangelog(repo)
619 620 clearfilecache(repo, b'_bookmarks')
620 621 def d():
621 622 repo._bookmarks
622 623 timer(d, setup=s)
623 624 fm.end()
624 625
625 626 @command(b'perfbundleread', formatteropts, b'BUNDLE')
626 627 def perfbundleread(ui, repo, bundlepath, **opts):
627 628 """Benchmark reading of bundle files.
628 629
629 630 This command is meant to isolate the I/O part of bundle reading as
630 631 much as possible.
631 632 """
632 633 from mercurial import (
633 634 bundle2,
634 635 exchange,
635 636 streamclone,
636 637 )
637 638
638 639 opts = _byteskwargs(opts)
639 640
640 641 def makebench(fn):
641 642 def run():
642 643 with open(bundlepath, b'rb') as fh:
643 644 bundle = exchange.readbundle(ui, fh, bundlepath)
644 645 fn(bundle)
645 646
646 647 return run
647 648
648 649 def makereadnbytes(size):
649 650 def run():
650 651 with open(bundlepath, b'rb') as fh:
651 652 bundle = exchange.readbundle(ui, fh, bundlepath)
652 653 while bundle.read(size):
653 654 pass
654 655
655 656 return run
656 657
657 658 def makestdioread(size):
658 659 def run():
659 660 with open(bundlepath, b'rb') as fh:
660 661 while fh.read(size):
661 662 pass
662 663
663 664 return run
664 665
665 666 # bundle1
666 667
667 668 def deltaiter(bundle):
668 669 for delta in bundle.deltaiter():
669 670 pass
670 671
671 672 def iterchunks(bundle):
672 673 for chunk in bundle.getchunks():
673 674 pass
674 675
675 676 # bundle2
676 677
677 678 def forwardchunks(bundle):
678 679 for chunk in bundle._forwardchunks():
679 680 pass
680 681
681 682 def iterparts(bundle):
682 683 for part in bundle.iterparts():
683 684 pass
684 685
685 686 def iterpartsseekable(bundle):
686 687 for part in bundle.iterparts(seekable=True):
687 688 pass
688 689
689 690 def seek(bundle):
690 691 for part in bundle.iterparts(seekable=True):
691 692 part.seek(0, os.SEEK_END)
692 693
693 694 def makepartreadnbytes(size):
694 695 def run():
695 696 with open(bundlepath, b'rb') as fh:
696 697 bundle = exchange.readbundle(ui, fh, bundlepath)
697 698 for part in bundle.iterparts():
698 699 while part.read(size):
699 700 pass
700 701
701 702 return run
702 703
703 704 benches = [
704 705 (makestdioread(8192), b'read(8k)'),
705 706 (makestdioread(16384), b'read(16k)'),
706 707 (makestdioread(32768), b'read(32k)'),
707 708 (makestdioread(131072), b'read(128k)'),
708 709 ]
709 710
710 711 with open(bundlepath, b'rb') as fh:
711 712 bundle = exchange.readbundle(ui, fh, bundlepath)
712 713
713 714 if isinstance(bundle, changegroup.cg1unpacker):
714 715 benches.extend([
715 716 (makebench(deltaiter), b'cg1 deltaiter()'),
716 717 (makebench(iterchunks), b'cg1 getchunks()'),
717 718 (makereadnbytes(8192), b'cg1 read(8k)'),
718 719 (makereadnbytes(16384), b'cg1 read(16k)'),
719 720 (makereadnbytes(32768), b'cg1 read(32k)'),
720 721 (makereadnbytes(131072), b'cg1 read(128k)'),
721 722 ])
722 723 elif isinstance(bundle, bundle2.unbundle20):
723 724 benches.extend([
724 725 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
725 726 (makebench(iterparts), b'bundle2 iterparts()'),
726 727 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
727 728 (makebench(seek), b'bundle2 part seek()'),
728 729 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
729 730 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
730 731 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
731 732 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
732 733 ])
733 734 elif isinstance(bundle, streamclone.streamcloneapplier):
734 735 raise error.Abort(b'stream clone bundles not supported')
735 736 else:
736 737 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
737 738
738 739 for fn, title in benches:
739 740 timer, fm = gettimer(ui, opts)
740 741 timer(fn, title=title)
741 742 fm.end()
742 743
743 744 @command(b'perfchangegroupchangelog', formatteropts +
744 745 [(b'', b'cgversion', b'02', b'changegroup version'),
745 746 (b'r', b'rev', b'', b'revisions to add to changegroup')])
746 747 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
747 748 """Benchmark producing a changelog group for a changegroup.
748 749
749 750 This measures the time spent processing the changelog during a
750 751 bundle operation. This occurs during `hg bundle` and on a server
751 752 processing a `getbundle` wire protocol request (handles clones
752 753 and pull requests).
753 754
754 755 By default, all revisions are added to the changegroup.
755 756 """
756 757 opts = _byteskwargs(opts)
757 758 cl = repo.changelog
758 759 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
759 760 bundler = changegroup.getbundler(cgversion, repo)
760 761
761 762 def d():
762 763 state, chunks = bundler._generatechangelog(cl, nodes)
763 764 for chunk in chunks:
764 765 pass
765 766
766 767 timer, fm = gettimer(ui, opts)
767 768
768 769 # Terminal printing can interfere with timing. So disable it.
769 770 with ui.configoverride({(b'progress', b'disable'): True}):
770 771 timer(d)
771 772
772 773 fm.end()
773 774
774 775 @command(b'perfdirs', formatteropts)
775 776 def perfdirs(ui, repo, **opts):
776 777 opts = _byteskwargs(opts)
777 778 timer, fm = gettimer(ui, opts)
778 779 dirstate = repo.dirstate
779 780 b'a' in dirstate
780 781 def d():
781 782 dirstate.hasdir(b'a')
782 783 del dirstate._map._dirs
783 784 timer(d)
784 785 fm.end()
785 786
786 787 @command(b'perfdirstate', formatteropts)
787 788 def perfdirstate(ui, repo, **opts):
788 789 opts = _byteskwargs(opts)
789 790 timer, fm = gettimer(ui, opts)
790 791 b"a" in repo.dirstate
791 792 def d():
792 793 repo.dirstate.invalidate()
793 794 b"a" in repo.dirstate
794 795 timer(d)
795 796 fm.end()
796 797
797 798 @command(b'perfdirstatedirs', formatteropts)
798 799 def perfdirstatedirs(ui, repo, **opts):
799 800 opts = _byteskwargs(opts)
800 801 timer, fm = gettimer(ui, opts)
801 802 b"a" in repo.dirstate
802 803 def d():
803 804 repo.dirstate.hasdir(b"a")
804 805 del repo.dirstate._map._dirs
805 806 timer(d)
806 807 fm.end()
807 808
808 809 @command(b'perfdirstatefoldmap', formatteropts)
809 810 def perfdirstatefoldmap(ui, repo, **opts):
810 811 opts = _byteskwargs(opts)
811 812 timer, fm = gettimer(ui, opts)
812 813 dirstate = repo.dirstate
813 814 b'a' in dirstate
814 815 def d():
815 816 dirstate._map.filefoldmap.get(b'a')
816 817 del dirstate._map.filefoldmap
817 818 timer(d)
818 819 fm.end()
819 820
820 821 @command(b'perfdirfoldmap', formatteropts)
821 822 def perfdirfoldmap(ui, repo, **opts):
822 823 opts = _byteskwargs(opts)
823 824 timer, fm = gettimer(ui, opts)
824 825 dirstate = repo.dirstate
825 826 b'a' in dirstate
826 827 def d():
827 828 dirstate._map.dirfoldmap.get(b'a')
828 829 del dirstate._map.dirfoldmap
829 830 del dirstate._map._dirs
830 831 timer(d)
831 832 fm.end()
832 833
833 834 @command(b'perfdirstatewrite', formatteropts)
834 835 def perfdirstatewrite(ui, repo, **opts):
835 836 opts = _byteskwargs(opts)
836 837 timer, fm = gettimer(ui, opts)
837 838 ds = repo.dirstate
838 839 b"a" in ds
839 840 def d():
840 841 ds._dirty = True
841 842 ds.write(repo.currenttransaction())
842 843 timer(d)
843 844 fm.end()
844 845
845 846 @command(b'perfmergecalculate',
846 847 [(b'r', b'rev', b'.', b'rev to merge against')] + formatteropts)
847 848 def perfmergecalculate(ui, repo, rev, **opts):
848 849 opts = _byteskwargs(opts)
849 850 timer, fm = gettimer(ui, opts)
850 851 wctx = repo[None]
851 852 rctx = scmutil.revsingle(repo, rev, rev)
852 853 ancestor = wctx.ancestor(rctx)
853 854 # we don't want working dir files to be stat'd in the benchmark, so prime
854 855 # that cache
855 856 wctx.dirty()
856 857 def d():
857 858 # acceptremote is True because we don't want prompts in the middle of
858 859 # our benchmark
859 860 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
860 861 acceptremote=True, followcopies=True)
861 862 timer(d)
862 863 fm.end()
863 864
864 865 @command(b'perfpathcopies', [], b"REV REV")
865 866 def perfpathcopies(ui, repo, rev1, rev2, **opts):
866 867 """benchmark the copy tracing logic"""
867 868 opts = _byteskwargs(opts)
868 869 timer, fm = gettimer(ui, opts)
869 870 ctx1 = scmutil.revsingle(repo, rev1, rev1)
870 871 ctx2 = scmutil.revsingle(repo, rev2, rev2)
871 872 def d():
872 873 copies.pathcopies(ctx1, ctx2)
873 874 timer(d)
874 875 fm.end()
875 876
876 877 @command(b'perfphases',
877 878 [(b'', b'full', False, b'include file reading time too'),
878 879 ], b"")
879 880 def perfphases(ui, repo, **opts):
880 881 """benchmark phasesets computation"""
881 882 opts = _byteskwargs(opts)
882 883 timer, fm = gettimer(ui, opts)
883 884 _phases = repo._phasecache
884 885 full = opts.get(b'full')
885 886 def d():
886 887 phases = _phases
887 888 if full:
888 889 clearfilecache(repo, b'_phasecache')
889 890 phases = repo._phasecache
890 891 phases.invalidate()
891 892 phases.loadphaserevs(repo)
892 893 timer(d)
893 894 fm.end()
894 895
895 896 @command(b'perfphasesremote',
896 897 [], b"[DEST]")
897 898 def perfphasesremote(ui, repo, dest=None, **opts):
898 899 """benchmark time needed to analyse phases of the remote server"""
899 900 from mercurial.node import (
900 901 bin,
901 902 )
902 903 from mercurial import (
903 904 exchange,
904 905 hg,
905 906 phases,
906 907 )
907 908 opts = _byteskwargs(opts)
908 909 timer, fm = gettimer(ui, opts)
909 910
910 911 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
911 912 if not path:
912 913 raise error.Abort((b'default repository not configured!'),
913 914 hint=(b"see 'hg help config.paths'"))
914 915 dest = path.pushloc or path.loc
915 916 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
916 917 other = hg.peer(repo, opts, dest)
917 918
918 919 # easier to perform discovery through the operation
919 920 op = exchange.pushoperation(repo, other)
920 921 exchange._pushdiscoverychangeset(op)
921 922
922 923 remotesubset = op.fallbackheads
923 924
924 925 with other.commandexecutor() as e:
925 926 remotephases = e.callcommand(b'listkeys',
926 927 {b'namespace': b'phases'}).result()
927 928 del other
928 929 publishing = remotephases.get(b'publishing', False)
929 930 if publishing:
930 931 ui.status((b'publishing: yes\n'))
931 932 else:
932 933 ui.status((b'publishing: no\n'))
933 934
934 935 nodemap = repo.changelog.nodemap
935 936 nonpublishroots = 0
936 937 for nhex, phase in remotephases.iteritems():
937 938 if nhex == b'publishing': # ignore data related to publish option
938 939 continue
939 940 node = bin(nhex)
940 941 if node in nodemap and int(phase):
941 942 nonpublishroots += 1
942 943 ui.status((b'number of roots: %d\n') % len(remotephases))
943 944 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
944 945 def d():
945 946 phases.remotephasessummary(repo,
946 947 remotesubset,
947 948 remotephases)
948 949 timer(d)
949 950 fm.end()
950 951
951 952 @command(b'perfmanifest',[
952 953 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
953 954 (b'', b'clear-disk', False, b'clear on-disk caches too'),
954 955 ] + formatteropts, b'REV|NODE')
955 956 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
956 957 """benchmark the time to read a manifest from disk and return a usable
957 958 dict-like object
958 959
959 960 Manifest caches are cleared before retrieval."""
960 961 opts = _byteskwargs(opts)
961 962 timer, fm = gettimer(ui, opts)
962 963 if not manifest_rev:
963 964 ctx = scmutil.revsingle(repo, rev, rev)
964 965 t = ctx.manifestnode()
965 966 else:
966 967 from mercurial.node import bin
967 968
968 969 if len(rev) == 40:
969 970 t = bin(rev)
970 971 else:
971 972 try:
972 973 rev = int(rev)
973 974
974 975 if util.safehasattr(repo.manifestlog, b'getstorage'):
975 976 t = repo.manifestlog.getstorage(b'').node(rev)
976 977 else:
977 978 t = repo.manifestlog._revlog.lookup(rev)
978 979 except ValueError:
979 980 raise error.Abort(b'manifest revision must be integer or full '
980 981 b'node')
981 982 def d():
982 983 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
983 984 repo.manifestlog[t].read()
984 985 timer(d)
985 986 fm.end()
986 987
987 988 @command(b'perfchangeset', formatteropts)
988 989 def perfchangeset(ui, repo, rev, **opts):
989 990 opts = _byteskwargs(opts)
990 991 timer, fm = gettimer(ui, opts)
991 992 n = scmutil.revsingle(repo, rev).node()
992 993 def d():
993 994 repo.changelog.read(n)
994 995 #repo.changelog._cache = None
995 996 timer(d)
996 997 fm.end()
997 998
998 999 @command(b'perfignore', formatteropts)
999 1000 def perfignore(ui, repo, **opts):
1000 1001 """benchmark operation related to computing ignore"""
1001 1002 opts = _byteskwargs(opts)
1002 1003 timer, fm = gettimer(ui, opts)
1003 1004 dirstate = repo.dirstate
1004 1005
1005 1006 def setupone():
1006 1007 dirstate.invalidate()
1007 1008 clearfilecache(dirstate, b'_ignore')
1008 1009
1009 1010 def runone():
1010 1011 dirstate._ignore
1011 1012
1012 1013 timer(runone, setup=setupone, title=b"load")
1013 1014 fm.end()
1014 1015
1015 1016 @command(b'perfindex', [
1016 1017 (b'', b'rev', b'', b'revision to be looked up (default tip)'),
1017 1018 ] + formatteropts)
1018 1019 def perfindex(ui, repo, **opts):
1019 1020 import mercurial.revlog
1020 1021 opts = _byteskwargs(opts)
1021 1022 timer, fm = gettimer(ui, opts)
1022 1023 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1023 1024 if opts[b'rev'] is None:
1024 1025 n = repo[b"tip"].node()
1025 1026 else:
1026 1027 rev = scmutil.revsingle(repo, opts[b'rev'])
1027 1028 n = repo[rev].node()
1028 1029
1029 1030 unfi = repo.unfiltered()
1030 1031 # find the filecache func directly
1031 1032 # This avoid polluting the benchmark with the filecache logic
1032 1033 makecl = unfi.__class__.changelog.func
1033 1034 def setup():
1034 1035 # probably not necessary, but for good measure
1035 1036 clearchangelog(unfi)
1036 1037 def d():
1037 1038 cl = makecl(unfi)
1038 1039 cl.rev(n)
1039 1040 timer(d, setup=setup)
1040 1041 fm.end()
1041 1042
1042 1043 @command(b'perfstartup', formatteropts)
1043 1044 def perfstartup(ui, repo, **opts):
1044 1045 opts = _byteskwargs(opts)
1045 1046 timer, fm = gettimer(ui, opts)
1046 1047 def d():
1047 1048 if os.name != r'nt':
1048 1049 os.system(b"HGRCPATH= %s version -q > /dev/null" %
1049 1050 fsencode(sys.argv[0]))
1050 1051 else:
1051 1052 os.environ[r'HGRCPATH'] = r' '
1052 1053 os.system(r"%s version -q > NUL" % sys.argv[0])
1053 1054 timer(d)
1054 1055 fm.end()
1055 1056
1056 1057 @command(b'perfparents', formatteropts)
1057 1058 def perfparents(ui, repo, **opts):
1058 1059 opts = _byteskwargs(opts)
1059 1060 timer, fm = gettimer(ui, opts)
1060 1061 # control the number of commits perfparents iterates over
1061 1062 # experimental config: perf.parentscount
1062 1063 count = getint(ui, b"perf", b"parentscount", 1000)
1063 1064 if len(repo.changelog) < count:
1064 1065 raise error.Abort(b"repo needs %d commits for this test" % count)
1065 1066 repo = repo.unfiltered()
1066 1067 nl = [repo.changelog.node(i) for i in _xrange(count)]
1067 1068 def d():
1068 1069 for n in nl:
1069 1070 repo.changelog.parents(n)
1070 1071 timer(d)
1071 1072 fm.end()
1072 1073
1073 1074 @command(b'perfctxfiles', formatteropts)
1074 1075 def perfctxfiles(ui, repo, x, **opts):
1075 1076 opts = _byteskwargs(opts)
1076 1077 x = int(x)
1077 1078 timer, fm = gettimer(ui, opts)
1078 1079 def d():
1079 1080 len(repo[x].files())
1080 1081 timer(d)
1081 1082 fm.end()
1082 1083
1083 1084 @command(b'perfrawfiles', formatteropts)
1084 1085 def perfrawfiles(ui, repo, x, **opts):
1085 1086 opts = _byteskwargs(opts)
1086 1087 x = int(x)
1087 1088 timer, fm = gettimer(ui, opts)
1088 1089 cl = repo.changelog
1089 1090 def d():
1090 1091 len(cl.read(x)[3])
1091 1092 timer(d)
1092 1093 fm.end()
1093 1094
1094 1095 @command(b'perflookup', formatteropts)
1095 1096 def perflookup(ui, repo, rev, **opts):
1096 1097 opts = _byteskwargs(opts)
1097 1098 timer, fm = gettimer(ui, opts)
1098 1099 timer(lambda: len(repo.lookup(rev)))
1099 1100 fm.end()
1100 1101
1101 1102 @command(b'perflinelogedits',
1102 1103 [(b'n', b'edits', 10000, b'number of edits'),
1103 1104 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1104 1105 ], norepo=True)
1105 1106 def perflinelogedits(ui, **opts):
1106 1107 from mercurial import linelog
1107 1108
1108 1109 opts = _byteskwargs(opts)
1109 1110
1110 1111 edits = opts[b'edits']
1111 1112 maxhunklines = opts[b'max_hunk_lines']
1112 1113
1113 1114 maxb1 = 100000
1114 1115 random.seed(0)
1115 1116 randint = random.randint
1116 1117 currentlines = 0
1117 1118 arglist = []
1118 1119 for rev in _xrange(edits):
1119 1120 a1 = randint(0, currentlines)
1120 1121 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1121 1122 b1 = randint(0, maxb1)
1122 1123 b2 = randint(b1, b1 + maxhunklines)
1123 1124 currentlines += (b2 - b1) - (a2 - a1)
1124 1125 arglist.append((rev, a1, a2, b1, b2))
1125 1126
1126 1127 def d():
1127 1128 ll = linelog.linelog()
1128 1129 for args in arglist:
1129 1130 ll.replacelines(*args)
1130 1131
1131 1132 timer, fm = gettimer(ui, opts)
1132 1133 timer(d)
1133 1134 fm.end()
1134 1135
1135 1136 @command(b'perfrevrange', formatteropts)
1136 1137 def perfrevrange(ui, repo, *specs, **opts):
1137 1138 opts = _byteskwargs(opts)
1138 1139 timer, fm = gettimer(ui, opts)
1139 1140 revrange = scmutil.revrange
1140 1141 timer(lambda: len(revrange(repo, specs)))
1141 1142 fm.end()
1142 1143
1143 1144 @command(b'perfnodelookup', formatteropts)
1144 1145 def perfnodelookup(ui, repo, rev, **opts):
1145 1146 opts = _byteskwargs(opts)
1146 1147 timer, fm = gettimer(ui, opts)
1147 1148 import mercurial.revlog
1148 1149 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1149 1150 n = scmutil.revsingle(repo, rev).node()
1150 1151 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1151 1152 def d():
1152 1153 cl.rev(n)
1153 1154 clearcaches(cl)
1154 1155 timer(d)
1155 1156 fm.end()
1156 1157
1157 1158 @command(b'perflog',
1158 1159 [(b'', b'rename', False, b'ask log to follow renames')
1159 1160 ] + formatteropts)
1160 1161 def perflog(ui, repo, rev=None, **opts):
1161 1162 opts = _byteskwargs(opts)
1162 1163 if rev is None:
1163 1164 rev=[]
1164 1165 timer, fm = gettimer(ui, opts)
1165 1166 ui.pushbuffer()
1166 1167 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1167 1168 copies=opts.get(b'rename')))
1168 1169 ui.popbuffer()
1169 1170 fm.end()
1170 1171
1171 1172 @command(b'perfmoonwalk', formatteropts)
1172 1173 def perfmoonwalk(ui, repo, **opts):
1173 1174 """benchmark walking the changelog backwards
1174 1175
1175 1176 This also loads the changelog data for each revision in the changelog.
1176 1177 """
1177 1178 opts = _byteskwargs(opts)
1178 1179 timer, fm = gettimer(ui, opts)
1179 1180 def moonwalk():
1180 1181 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1181 1182 ctx = repo[i]
1182 1183 ctx.branch() # read changelog data (in addition to the index)
1183 1184 timer(moonwalk)
1184 1185 fm.end()
1185 1186
1186 1187 @command(b'perftemplating',
1187 1188 [(b'r', b'rev', [], b'revisions to run the template on'),
1188 1189 ] + formatteropts)
1189 1190 def perftemplating(ui, repo, testedtemplate=None, **opts):
1190 1191 """test the rendering time of a given template"""
1191 1192 if makelogtemplater is None:
1192 1193 raise error.Abort((b"perftemplating not available with this Mercurial"),
1193 1194 hint=b"use 4.3 or later")
1194 1195
1195 1196 opts = _byteskwargs(opts)
1196 1197
1197 1198 nullui = ui.copy()
1198 1199 nullui.fout = open(os.devnull, r'wb')
1199 1200 nullui.disablepager()
1200 1201 revs = opts.get(b'rev')
1201 1202 if not revs:
1202 1203 revs = [b'all()']
1203 1204 revs = list(scmutil.revrange(repo, revs))
1204 1205
1205 1206 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1206 1207 b' {author|person}: {desc|firstline}\n')
1207 1208 if testedtemplate is None:
1208 1209 testedtemplate = defaulttemplate
1209 1210 displayer = makelogtemplater(nullui, repo, testedtemplate)
1210 1211 def format():
1211 1212 for r in revs:
1212 1213 ctx = repo[r]
1213 1214 displayer.show(ctx)
1214 1215 displayer.flush(ctx)
1215 1216
1216 1217 timer, fm = gettimer(ui, opts)
1217 1218 timer(format)
1218 1219 fm.end()
1219 1220
1220 1221 @command(b'perfhelper-pathcopies', formatteropts +
1221 1222 [
1222 1223 (b'r', b'revs', [], b'restrict search to these revisions'),
1223 1224 (b'', b'timing', False, b'provides extra data (costly)'),
1224 1225 ])
1225 1226 def perfhelperpathcopies(ui, repo, revs=[], **opts):
1226 1227 """find statistic about potential parameters for the `perftracecopies`
1227 1228
1228 1229 This command find source-destination pair relevant for copytracing testing.
1229 1230 It report value for some of the parameters that impact copy tracing time.
1230 1231
1231 1232 If `--timing` is set, rename detection is run and the associated timing
1232 1233 will be reported. The extra details comes at the cost of a slower command
1233 1234 execution.
1234 1235
1235 1236 Since the rename detection is only run once, other factors might easily
1236 1237 affect the precision of the timing. However it should give a good
1237 1238 approximation of which revision pairs are very costly.
1238 1239 """
1239 1240 opts = _byteskwargs(opts)
1240 1241 fm = ui.formatter(b'perf', opts)
1241 1242 dotiming = opts[b'timing']
1242 1243
1243 1244 if dotiming:
1244 1245 header = '%12s %12s %12s %12s %12s %12s\n'
1245 1246 output = ("%(source)12s %(destination)12s "
1246 1247 "%(nbrevs)12d %(nbmissingfiles)12d "
1247 1248 "%(nbrenamedfiles)12d %(time)18.5f\n")
1248 1249 header_names = ("source", "destination", "nb-revs", "nb-files",
1249 1250 "nb-renames", "time")
1250 1251 fm.plain(header % header_names)
1251 1252 else:
1252 1253 header = '%12s %12s %12s %12s\n'
1253 1254 output = ("%(source)12s %(destination)12s "
1254 1255 "%(nbrevs)12d %(nbmissingfiles)12d\n")
1255 1256 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
1256 1257
1257 1258 if not revs:
1258 1259 revs = ['all()']
1259 1260 revs = scmutil.revrange(repo, revs)
1260 1261
1261 1262 roi = repo.revs('merge() and %ld', revs)
1262 1263 for r in roi:
1263 1264 ctx = repo[r]
1264 1265 p1 = ctx.p1().rev()
1265 1266 p2 = ctx.p2().rev()
1266 1267 bases = repo.changelog._commonancestorsheads(p1, p2)
1267 1268 for p in (p1, p2):
1268 1269 for b in bases:
1269 1270 base = repo[b]
1270 1271 parent = repo[p]
1271 1272 missing = copies._computeforwardmissing(base, parent)
1272 1273 if not missing:
1273 1274 continue
1274 1275 data = {
1275 1276 b'source': base.hex(),
1276 1277 b'destination': parent.hex(),
1277 1278 b'nbrevs': len(repo.revs('%d::%d', b, p)),
1278 1279 b'nbmissingfiles': len(missing),
1279 1280 }
1280 1281 if dotiming:
1281 1282 begin = util.timer()
1282 1283 renames = copies.pathcopies(base, parent)
1283 1284 end = util.timer()
1284 1285 # not very stable timing since we did only one run
1285 1286 data['time'] = end - begin
1286 1287 data['nbrenamedfiles'] = len(renames)
1287 1288 fm.startitem()
1288 1289 fm.data(**data)
1289 1290 out = data.copy()
1290 1291 out['source'] = fm.hexfunc(base.node())
1291 1292 out['destination'] = fm.hexfunc(parent.node())
1292 1293 fm.plain(output % out)
1293 1294
1294 1295 fm.end()
1295 1296
1296 1297 @command(b'perfcca', formatteropts)
1297 1298 def perfcca(ui, repo, **opts):
1298 1299 opts = _byteskwargs(opts)
1299 1300 timer, fm = gettimer(ui, opts)
1300 1301 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1301 1302 fm.end()
1302 1303
1303 1304 @command(b'perffncacheload', formatteropts)
1304 1305 def perffncacheload(ui, repo, **opts):
1305 1306 opts = _byteskwargs(opts)
1306 1307 timer, fm = gettimer(ui, opts)
1307 1308 s = repo.store
1308 1309 def d():
1309 1310 s.fncache._load()
1310 1311 timer(d)
1311 1312 fm.end()
1312 1313
1313 1314 @command(b'perffncachewrite', formatteropts)
1314 1315 def perffncachewrite(ui, repo, **opts):
1315 1316 opts = _byteskwargs(opts)
1316 1317 timer, fm = gettimer(ui, opts)
1317 1318 s = repo.store
1318 1319 lock = repo.lock()
1319 1320 s.fncache._load()
1320 1321 tr = repo.transaction(b'perffncachewrite')
1321 1322 tr.addbackup(b'fncache')
1322 1323 def d():
1323 1324 s.fncache._dirty = True
1324 1325 s.fncache.write(tr)
1325 1326 timer(d)
1326 1327 tr.close()
1327 1328 lock.release()
1328 1329 fm.end()
1329 1330
1330 1331 @command(b'perffncacheencode', formatteropts)
1331 1332 def perffncacheencode(ui, repo, **opts):
1332 1333 opts = _byteskwargs(opts)
1333 1334 timer, fm = gettimer(ui, opts)
1334 1335 s = repo.store
1335 1336 s.fncache._load()
1336 1337 def d():
1337 1338 for p in s.fncache.entries:
1338 1339 s.encode(p)
1339 1340 timer(d)
1340 1341 fm.end()
1341 1342
1342 1343 def _bdiffworker(q, blocks, xdiff, ready, done):
1343 1344 while not done.is_set():
1344 1345 pair = q.get()
1345 1346 while pair is not None:
1346 1347 if xdiff:
1347 1348 mdiff.bdiff.xdiffblocks(*pair)
1348 1349 elif blocks:
1349 1350 mdiff.bdiff.blocks(*pair)
1350 1351 else:
1351 1352 mdiff.textdiff(*pair)
1352 1353 q.task_done()
1353 1354 pair = q.get()
1354 1355 q.task_done() # for the None one
1355 1356 with ready:
1356 1357 ready.wait()
1357 1358
1358 1359 def _manifestrevision(repo, mnode):
1359 1360 ml = repo.manifestlog
1360 1361
1361 1362 if util.safehasattr(ml, b'getstorage'):
1362 1363 store = ml.getstorage(b'')
1363 1364 else:
1364 1365 store = ml._revlog
1365 1366
1366 1367 return store.revision(mnode)
1367 1368
1368 1369 @command(b'perfbdiff', revlogopts + formatteropts + [
1369 1370 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1370 1371 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1371 1372 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1372 1373 (b'', b'blocks', False, b'test computing diffs into blocks'),
1373 1374 (b'', b'xdiff', False, b'use xdiff algorithm'),
1374 1375 ],
1375 1376
1376 1377 b'-c|-m|FILE REV')
1377 1378 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1378 1379 """benchmark a bdiff between revisions
1379 1380
1380 1381 By default, benchmark a bdiff between its delta parent and itself.
1381 1382
1382 1383 With ``--count``, benchmark bdiffs between delta parents and self for N
1383 1384 revisions starting at the specified revision.
1384 1385
1385 1386 With ``--alldata``, assume the requested revision is a changeset and
1386 1387 measure bdiffs for all changes related to that changeset (manifest
1387 1388 and filelogs).
1388 1389 """
1389 1390 opts = _byteskwargs(opts)
1390 1391
1391 1392 if opts[b'xdiff'] and not opts[b'blocks']:
1392 1393 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1393 1394
1394 1395 if opts[b'alldata']:
1395 1396 opts[b'changelog'] = True
1396 1397
1397 1398 if opts.get(b'changelog') or opts.get(b'manifest'):
1398 1399 file_, rev = None, file_
1399 1400 elif rev is None:
1400 1401 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1401 1402
1402 1403 blocks = opts[b'blocks']
1403 1404 xdiff = opts[b'xdiff']
1404 1405 textpairs = []
1405 1406
1406 1407 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1407 1408
1408 1409 startrev = r.rev(r.lookup(rev))
1409 1410 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1410 1411 if opts[b'alldata']:
1411 1412 # Load revisions associated with changeset.
1412 1413 ctx = repo[rev]
1413 1414 mtext = _manifestrevision(repo, ctx.manifestnode())
1414 1415 for pctx in ctx.parents():
1415 1416 pman = _manifestrevision(repo, pctx.manifestnode())
1416 1417 textpairs.append((pman, mtext))
1417 1418
1418 1419 # Load filelog revisions by iterating manifest delta.
1419 1420 man = ctx.manifest()
1420 1421 pman = ctx.p1().manifest()
1421 1422 for filename, change in pman.diff(man).items():
1422 1423 fctx = repo.file(filename)
1423 1424 f1 = fctx.revision(change[0][0] or -1)
1424 1425 f2 = fctx.revision(change[1][0] or -1)
1425 1426 textpairs.append((f1, f2))
1426 1427 else:
1427 1428 dp = r.deltaparent(rev)
1428 1429 textpairs.append((r.revision(dp), r.revision(rev)))
1429 1430
1430 1431 withthreads = threads > 0
1431 1432 if not withthreads:
1432 1433 def d():
1433 1434 for pair in textpairs:
1434 1435 if xdiff:
1435 1436 mdiff.bdiff.xdiffblocks(*pair)
1436 1437 elif blocks:
1437 1438 mdiff.bdiff.blocks(*pair)
1438 1439 else:
1439 1440 mdiff.textdiff(*pair)
1440 1441 else:
1441 1442 q = queue()
1442 1443 for i in _xrange(threads):
1443 1444 q.put(None)
1444 1445 ready = threading.Condition()
1445 1446 done = threading.Event()
1446 1447 for i in _xrange(threads):
1447 1448 threading.Thread(target=_bdiffworker,
1448 1449 args=(q, blocks, xdiff, ready, done)).start()
1449 1450 q.join()
1450 1451 def d():
1451 1452 for pair in textpairs:
1452 1453 q.put(pair)
1453 1454 for i in _xrange(threads):
1454 1455 q.put(None)
1455 1456 with ready:
1456 1457 ready.notify_all()
1457 1458 q.join()
1458 1459 timer, fm = gettimer(ui, opts)
1459 1460 timer(d)
1460 1461 fm.end()
1461 1462
1462 1463 if withthreads:
1463 1464 done.set()
1464 1465 for i in _xrange(threads):
1465 1466 q.put(None)
1466 1467 with ready:
1467 1468 ready.notify_all()
1468 1469
1469 1470 @command(b'perfunidiff', revlogopts + formatteropts + [
1470 1471 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1471 1472 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
1472 1473 ], b'-c|-m|FILE REV')
1473 1474 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1474 1475 """benchmark a unified diff between revisions
1475 1476
1476 1477 This doesn't include any copy tracing - it's just a unified diff
1477 1478 of the texts.
1478 1479
1479 1480 By default, benchmark a diff between its delta parent and itself.
1480 1481
1481 1482 With ``--count``, benchmark diffs between delta parents and self for N
1482 1483 revisions starting at the specified revision.
1483 1484
1484 1485 With ``--alldata``, assume the requested revision is a changeset and
1485 1486 measure diffs for all changes related to that changeset (manifest
1486 1487 and filelogs).
1487 1488 """
1488 1489 opts = _byteskwargs(opts)
1489 1490 if opts[b'alldata']:
1490 1491 opts[b'changelog'] = True
1491 1492
1492 1493 if opts.get(b'changelog') or opts.get(b'manifest'):
1493 1494 file_, rev = None, file_
1494 1495 elif rev is None:
1495 1496 raise error.CommandError(b'perfunidiff', b'invalid arguments')
1496 1497
1497 1498 textpairs = []
1498 1499
1499 1500 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
1500 1501
1501 1502 startrev = r.rev(r.lookup(rev))
1502 1503 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1503 1504 if opts[b'alldata']:
1504 1505 # Load revisions associated with changeset.
1505 1506 ctx = repo[rev]
1506 1507 mtext = _manifestrevision(repo, ctx.manifestnode())
1507 1508 for pctx in ctx.parents():
1508 1509 pman = _manifestrevision(repo, pctx.manifestnode())
1509 1510 textpairs.append((pman, mtext))
1510 1511
1511 1512 # Load filelog revisions by iterating manifest delta.
1512 1513 man = ctx.manifest()
1513 1514 pman = ctx.p1().manifest()
1514 1515 for filename, change in pman.diff(man).items():
1515 1516 fctx = repo.file(filename)
1516 1517 f1 = fctx.revision(change[0][0] or -1)
1517 1518 f2 = fctx.revision(change[1][0] or -1)
1518 1519 textpairs.append((f1, f2))
1519 1520 else:
1520 1521 dp = r.deltaparent(rev)
1521 1522 textpairs.append((r.revision(dp), r.revision(rev)))
1522 1523
1523 1524 def d():
1524 1525 for left, right in textpairs:
1525 1526 # The date strings don't matter, so we pass empty strings.
1526 1527 headerlines, hunks = mdiff.unidiff(
1527 1528 left, b'', right, b'', b'left', b'right', binary=False)
1528 1529 # consume iterators in roughly the way patch.py does
1529 1530 b'\n'.join(headerlines)
1530 1531 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1531 1532 timer, fm = gettimer(ui, opts)
1532 1533 timer(d)
1533 1534 fm.end()
1534 1535
1535 1536 @command(b'perfdiffwd', formatteropts)
1536 1537 def perfdiffwd(ui, repo, **opts):
1537 1538 """Profile diff of working directory changes"""
1538 1539 opts = _byteskwargs(opts)
1539 1540 timer, fm = gettimer(ui, opts)
1540 1541 options = {
1541 1542 'w': 'ignore_all_space',
1542 1543 'b': 'ignore_space_change',
1543 1544 'B': 'ignore_blank_lines',
1544 1545 }
1545 1546
1546 1547 for diffopt in ('', 'w', 'b', 'B', 'wB'):
1547 1548 opts = dict((options[c], b'1') for c in diffopt)
1548 1549 def d():
1549 1550 ui.pushbuffer()
1550 1551 commands.diff(ui, repo, **opts)
1551 1552 ui.popbuffer()
1552 1553 diffopt = diffopt.encode('ascii')
1553 1554 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
1554 1555 timer(d, title=title)
1555 1556 fm.end()
1556 1557
1557 1558 @command(b'perfrevlogindex', revlogopts + formatteropts,
1558 1559 b'-c|-m|FILE')
1559 1560 def perfrevlogindex(ui, repo, file_=None, **opts):
1560 1561 """Benchmark operations against a revlog index.
1561 1562
1562 1563 This tests constructing a revlog instance, reading index data,
1563 1564 parsing index data, and performing various operations related to
1564 1565 index data.
1565 1566 """
1566 1567
1567 1568 opts = _byteskwargs(opts)
1568 1569
1569 1570 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
1570 1571
1571 1572 opener = getattr(rl, 'opener') # trick linter
1572 1573 indexfile = rl.indexfile
1573 1574 data = opener.read(indexfile)
1574 1575
1575 1576 header = struct.unpack(b'>I', data[0:4])[0]
1576 1577 version = header & 0xFFFF
1577 1578 if version == 1:
1578 1579 revlogio = revlog.revlogio()
1579 1580 inline = header & (1 << 16)
1580 1581 else:
1581 1582 raise error.Abort((b'unsupported revlog version: %d') % version)
1582 1583
1583 1584 rllen = len(rl)
1584 1585
1585 1586 node0 = rl.node(0)
1586 1587 node25 = rl.node(rllen // 4)
1587 1588 node50 = rl.node(rllen // 2)
1588 1589 node75 = rl.node(rllen // 4 * 3)
1589 1590 node100 = rl.node(rllen - 1)
1590 1591
1591 1592 allrevs = range(rllen)
1592 1593 allrevsrev = list(reversed(allrevs))
1593 1594 allnodes = [rl.node(rev) for rev in range(rllen)]
1594 1595 allnodesrev = list(reversed(allnodes))
1595 1596
1596 1597 def constructor():
1597 1598 revlog.revlog(opener, indexfile)
1598 1599
1599 1600 def read():
1600 1601 with opener(indexfile) as fh:
1601 1602 fh.read()
1602 1603
1603 1604 def parseindex():
1604 1605 revlogio.parseindex(data, inline)
1605 1606
1606 1607 def getentry(revornode):
1607 1608 index = revlogio.parseindex(data, inline)[0]
1608 1609 index[revornode]
1609 1610
1610 1611 def getentries(revs, count=1):
1611 1612 index = revlogio.parseindex(data, inline)[0]
1612 1613
1613 1614 for i in range(count):
1614 1615 for rev in revs:
1615 1616 index[rev]
1616 1617
1617 1618 def resolvenode(node):
1618 1619 nodemap = revlogio.parseindex(data, inline)[1]
1619 1620 # This only works for the C code.
1620 1621 if nodemap is None:
1621 1622 return
1622 1623
1623 1624 try:
1624 1625 nodemap[node]
1625 1626 except error.RevlogError:
1626 1627 pass
1627 1628
1628 1629 def resolvenodes(nodes, count=1):
1629 1630 nodemap = revlogio.parseindex(data, inline)[1]
1630 1631 if nodemap is None:
1631 1632 return
1632 1633
1633 1634 for i in range(count):
1634 1635 for node in nodes:
1635 1636 try:
1636 1637 nodemap[node]
1637 1638 except error.RevlogError:
1638 1639 pass
1639 1640
1640 1641 benches = [
1641 1642 (constructor, b'revlog constructor'),
1642 1643 (read, b'read'),
1643 1644 (parseindex, b'create index object'),
1644 1645 (lambda: getentry(0), b'retrieve index entry for rev 0'),
1645 1646 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
1646 1647 (lambda: resolvenode(node0), b'look up node at rev 0'),
1647 1648 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
1648 1649 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
1649 1650 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
1650 1651 (lambda: resolvenode(node100), b'look up node at tip'),
1651 1652 # 2x variation is to measure caching impact.
1652 1653 (lambda: resolvenodes(allnodes),
1653 1654 b'look up all nodes (forward)'),
1654 1655 (lambda: resolvenodes(allnodes, 2),
1655 1656 b'look up all nodes 2x (forward)'),
1656 1657 (lambda: resolvenodes(allnodesrev),
1657 1658 b'look up all nodes (reverse)'),
1658 1659 (lambda: resolvenodes(allnodesrev, 2),
1659 1660 b'look up all nodes 2x (reverse)'),
1660 1661 (lambda: getentries(allrevs),
1661 1662 b'retrieve all index entries (forward)'),
1662 1663 (lambda: getentries(allrevs, 2),
1663 1664 b'retrieve all index entries 2x (forward)'),
1664 1665 (lambda: getentries(allrevsrev),
1665 1666 b'retrieve all index entries (reverse)'),
1666 1667 (lambda: getentries(allrevsrev, 2),
1667 1668 b'retrieve all index entries 2x (reverse)'),
1668 1669 ]
1669 1670
1670 1671 for fn, title in benches:
1671 1672 timer, fm = gettimer(ui, opts)
1672 1673 timer(fn, title=title)
1673 1674 fm.end()
1674 1675
1675 1676 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
1676 1677 [(b'd', b'dist', 100, b'distance between the revisions'),
1677 1678 (b's', b'startrev', 0, b'revision to start reading at'),
1678 1679 (b'', b'reverse', False, b'read in reverse')],
1679 1680 b'-c|-m|FILE')
1680 1681 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
1681 1682 **opts):
1682 1683 """Benchmark reading a series of revisions from a revlog.
1683 1684
1684 1685 By default, we read every ``-d/--dist`` revision from 0 to tip of
1685 1686 the specified revlog.
1686 1687
1687 1688 The start revision can be defined via ``-s/--startrev``.
1688 1689 """
1689 1690 opts = _byteskwargs(opts)
1690 1691
1691 1692 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
1692 1693 rllen = getlen(ui)(rl)
1693 1694
1694 1695 if startrev < 0:
1695 1696 startrev = rllen + startrev
1696 1697
1697 1698 def d():
1698 1699 rl.clearcaches()
1699 1700
1700 1701 beginrev = startrev
1701 1702 endrev = rllen
1702 1703 dist = opts[b'dist']
1703 1704
1704 1705 if reverse:
1705 1706 beginrev, endrev = endrev - 1, beginrev - 1
1706 1707 dist = -1 * dist
1707 1708
1708 1709 for x in _xrange(beginrev, endrev, dist):
1709 1710 # Old revisions don't support passing int.
1710 1711 n = rl.node(x)
1711 1712 rl.revision(n)
1712 1713
1713 1714 timer, fm = gettimer(ui, opts)
1714 1715 timer(d)
1715 1716 fm.end()
1716 1717
1717 1718 @command(b'perfrevlogwrite', revlogopts + formatteropts +
1718 1719 [(b's', b'startrev', 1000, b'revision to start writing at'),
1719 1720 (b'', b'stoprev', -1, b'last revision to write'),
1720 1721 (b'', b'count', 3, b'last revision to write'),
1721 1722 (b'', b'details', False, b'print timing for every revisions tested'),
1722 1723 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
1723 1724 (b'', b'lazydeltabase', True, b'try the provided delta first'),
1724 1725 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1725 1726 ],
1726 1727 b'-c|-m|FILE')
1727 1728 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
1728 1729 """Benchmark writing a series of revisions to a revlog.
1729 1730
1730 1731 Possible source values are:
1731 1732 * `full`: add from a full text (default).
1732 1733 * `parent-1`: add from a delta to the first parent
1733 1734 * `parent-2`: add from a delta to the second parent if it exists
1734 1735 (use a delta from the first parent otherwise)
1735 1736 * `parent-smallest`: add from the smallest delta (either p1 or p2)
1736 1737 * `storage`: add from the existing precomputed deltas
1737 1738 """
1738 1739 opts = _byteskwargs(opts)
1739 1740
1740 1741 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
1741 1742 rllen = getlen(ui)(rl)
1742 1743 if startrev < 0:
1743 1744 startrev = rllen + startrev
1744 1745 if stoprev < 0:
1745 1746 stoprev = rllen + stoprev
1746 1747
1747 1748 lazydeltabase = opts['lazydeltabase']
1748 1749 source = opts['source']
1749 1750 clearcaches = opts['clear_caches']
1750 1751 validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest',
1751 1752 b'storage')
1752 1753 if source not in validsource:
1753 1754 raise error.Abort('invalid source type: %s' % source)
1754 1755
1755 1756 ### actually gather results
1756 1757 count = opts['count']
1757 1758 if count <= 0:
1758 1759 raise error.Abort('invalide run count: %d' % count)
1759 1760 allresults = []
1760 1761 for c in range(count):
1761 1762 timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1,
1762 1763 lazydeltabase=lazydeltabase,
1763 1764 clearcaches=clearcaches)
1764 1765 allresults.append(timing)
1765 1766
1766 1767 ### consolidate the results in a single list
1767 1768 results = []
1768 1769 for idx, (rev, t) in enumerate(allresults[0]):
1769 1770 ts = [t]
1770 1771 for other in allresults[1:]:
1771 1772 orev, ot = other[idx]
1772 1773 assert orev == rev
1773 1774 ts.append(ot)
1774 1775 results.append((rev, ts))
1775 1776 resultcount = len(results)
1776 1777
1777 1778 ### Compute and display relevant statistics
1778 1779
1779 1780 # get a formatter
1780 1781 fm = ui.formatter(b'perf', opts)
1781 1782 displayall = ui.configbool(b"perf", b"all-timing", False)
1782 1783
1783 1784 # print individual details if requested
1784 1785 if opts['details']:
1785 1786 for idx, item in enumerate(results, 1):
1786 1787 rev, data = item
1787 1788 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
1788 1789 formatone(fm, data, title=title, displayall=displayall)
1789 1790
1790 1791 # sorts results by median time
1791 1792 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
1792 1793 # list of (name, index) to display)
1793 1794 relevants = [
1794 1795 ("min", 0),
1795 1796 ("10%", resultcount * 10 // 100),
1796 1797 ("25%", resultcount * 25 // 100),
1797 1798 ("50%", resultcount * 70 // 100),
1798 1799 ("75%", resultcount * 75 // 100),
1799 1800 ("90%", resultcount * 90 // 100),
1800 1801 ("95%", resultcount * 95 // 100),
1801 1802 ("99%", resultcount * 99 // 100),
1802 1803 ("99.9%", resultcount * 999 // 1000),
1803 1804 ("99.99%", resultcount * 9999 // 10000),
1804 1805 ("99.999%", resultcount * 99999 // 100000),
1805 1806 ("max", -1),
1806 1807 ]
1807 1808 if not ui.quiet:
1808 1809 for name, idx in relevants:
1809 1810 data = results[idx]
1810 1811 title = '%s of %d, rev %d' % (name, resultcount, data[0])
1811 1812 formatone(fm, data[1], title=title, displayall=displayall)
1812 1813
1813 1814 # XXX summing that many float will not be very precise, we ignore this fact
1814 1815 # for now
1815 1816 totaltime = []
1816 1817 for item in allresults:
1817 1818 totaltime.append((sum(x[1][0] for x in item),
1818 1819 sum(x[1][1] for x in item),
1819 1820 sum(x[1][2] for x in item),)
1820 1821 )
1821 1822 formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
1822 1823 displayall=displayall)
1823 1824 fm.end()
1824 1825
1825 1826 class _faketr(object):
1826 1827 def add(s, x, y, z=None):
1827 1828 return None
1828 1829
1829 1830 def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None,
1830 1831 lazydeltabase=True, clearcaches=True):
1831 1832 timings = []
1832 1833 tr = _faketr()
1833 1834 with _temprevlog(ui, orig, startrev) as dest:
1834 1835 dest._lazydeltabase = lazydeltabase
1835 1836 revs = list(orig.revs(startrev, stoprev))
1836 1837 total = len(revs)
1837 1838 topic = 'adding'
1838 1839 if runidx is not None:
1839 1840 topic += ' (run #%d)' % runidx
1840 1841 # Support both old and new progress API
1841 1842 if util.safehasattr(ui, 'makeprogress'):
1842 1843 progress = ui.makeprogress(topic, unit='revs', total=total)
1843 1844 def updateprogress(pos):
1844 1845 progress.update(pos)
1845 1846 def completeprogress():
1846 1847 progress.complete()
1847 1848 else:
1848 1849 def updateprogress(pos):
1849 1850 ui.progress(topic, pos, unit='revs', total=total)
1850 1851 def completeprogress():
1851 1852 ui.progress(topic, None, unit='revs', total=total)
1852 1853
1853 1854 for idx, rev in enumerate(revs):
1854 1855 updateprogress(idx)
1855 1856 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
1856 1857 if clearcaches:
1857 1858 dest.index.clearcaches()
1858 1859 dest.clearcaches()
1859 1860 with timeone() as r:
1860 1861 dest.addrawrevision(*addargs, **addkwargs)
1861 1862 timings.append((rev, r[0]))
1862 1863 updateprogress(total)
1863 1864 completeprogress()
1864 1865 return timings
1865 1866
1866 1867 def _getrevisionseed(orig, rev, tr, source):
1867 1868 from mercurial.node import nullid
1868 1869
1869 1870 linkrev = orig.linkrev(rev)
1870 1871 node = orig.node(rev)
1871 1872 p1, p2 = orig.parents(node)
1872 1873 flags = orig.flags(rev)
1873 1874 cachedelta = None
1874 1875 text = None
1875 1876
1876 1877 if source == b'full':
1877 1878 text = orig.revision(rev)
1878 1879 elif source == b'parent-1':
1879 1880 baserev = orig.rev(p1)
1880 1881 cachedelta = (baserev, orig.revdiff(p1, rev))
1881 1882 elif source == b'parent-2':
1882 1883 parent = p2
1883 1884 if p2 == nullid:
1884 1885 parent = p1
1885 1886 baserev = orig.rev(parent)
1886 1887 cachedelta = (baserev, orig.revdiff(parent, rev))
1887 1888 elif source == b'parent-smallest':
1888 1889 p1diff = orig.revdiff(p1, rev)
1889 1890 parent = p1
1890 1891 diff = p1diff
1891 1892 if p2 != nullid:
1892 1893 p2diff = orig.revdiff(p2, rev)
1893 1894 if len(p1diff) > len(p2diff):
1894 1895 parent = p2
1895 1896 diff = p2diff
1896 1897 baserev = orig.rev(parent)
1897 1898 cachedelta = (baserev, diff)
1898 1899 elif source == b'storage':
1899 1900 baserev = orig.deltaparent(rev)
1900 1901 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
1901 1902
1902 1903 return ((text, tr, linkrev, p1, p2),
1903 1904 {'node': node, 'flags': flags, 'cachedelta': cachedelta})
1904 1905
1905 1906 @contextlib.contextmanager
1906 1907 def _temprevlog(ui, orig, truncaterev):
1907 1908 from mercurial import vfs as vfsmod
1908 1909
1909 1910 if orig._inline:
1910 1911 raise error.Abort('not supporting inline revlog (yet)')
1911 1912
1912 1913 origindexpath = orig.opener.join(orig.indexfile)
1913 1914 origdatapath = orig.opener.join(orig.datafile)
1914 1915 indexname = 'revlog.i'
1915 1916 dataname = 'revlog.d'
1916 1917
1917 1918 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
1918 1919 try:
1919 1920 # copy the data file in a temporary directory
1920 1921 ui.debug('copying data in %s\n' % tmpdir)
1921 1922 destindexpath = os.path.join(tmpdir, 'revlog.i')
1922 1923 destdatapath = os.path.join(tmpdir, 'revlog.d')
1923 1924 shutil.copyfile(origindexpath, destindexpath)
1924 1925 shutil.copyfile(origdatapath, destdatapath)
1925 1926
1926 1927 # remove the data we want to add again
1927 1928 ui.debug('truncating data to be rewritten\n')
1928 1929 with open(destindexpath, 'ab') as index:
1929 1930 index.seek(0)
1930 1931 index.truncate(truncaterev * orig._io.size)
1931 1932 with open(destdatapath, 'ab') as data:
1932 1933 data.seek(0)
1933 1934 data.truncate(orig.start(truncaterev))
1934 1935
1935 1936 # instantiate a new revlog from the temporary copy
1936 1937 ui.debug('truncating adding to be rewritten\n')
1937 1938 vfs = vfsmod.vfs(tmpdir)
1938 1939 vfs.options = getattr(orig.opener, 'options', None)
1939 1940
1940 1941 dest = revlog.revlog(vfs,
1941 1942 indexfile=indexname,
1942 1943 datafile=dataname)
1943 1944 if dest._inline:
1944 1945 raise error.Abort('not supporting inline revlog (yet)')
1945 1946 # make sure internals are initialized
1946 1947 dest.revision(len(dest) - 1)
1947 1948 yield dest
1948 1949 del dest, vfs
1949 1950 finally:
1950 1951 shutil.rmtree(tmpdir, True)
1951 1952
1952 1953 @command(b'perfrevlogchunks', revlogopts + formatteropts +
1953 1954 [(b'e', b'engines', b'', b'compression engines to use'),
1954 1955 (b's', b'startrev', 0, b'revision to start at')],
1955 1956 b'-c|-m|FILE')
1956 1957 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
1957 1958 """Benchmark operations on revlog chunks.
1958 1959
1959 1960 Logically, each revlog is a collection of fulltext revisions. However,
1960 1961 stored within each revlog are "chunks" of possibly compressed data. This
1961 1962 data needs to be read and decompressed or compressed and written.
1962 1963
1963 1964 This command measures the time it takes to read+decompress and recompress
1964 1965 chunks in a revlog. It effectively isolates I/O and compression performance.
1965 1966 For measurements of higher-level operations like resolving revisions,
1966 1967 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
1967 1968 """
1968 1969 opts = _byteskwargs(opts)
1969 1970
1970 1971 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
1971 1972
1972 1973 # _chunkraw was renamed to _getsegmentforrevs.
1973 1974 try:
1974 1975 segmentforrevs = rl._getsegmentforrevs
1975 1976 except AttributeError:
1976 1977 segmentforrevs = rl._chunkraw
1977 1978
1978 1979 # Verify engines argument.
1979 1980 if engines:
1980 1981 engines = set(e.strip() for e in engines.split(b','))
1981 1982 for engine in engines:
1982 1983 try:
1983 1984 util.compressionengines[engine]
1984 1985 except KeyError:
1985 1986 raise error.Abort(b'unknown compression engine: %s' % engine)
1986 1987 else:
1987 1988 engines = []
1988 1989 for e in util.compengines:
1989 1990 engine = util.compengines[e]
1990 1991 try:
1991 1992 if engine.available():
1992 1993 engine.revlogcompressor().compress(b'dummy')
1993 1994 engines.append(e)
1994 1995 except NotImplementedError:
1995 1996 pass
1996 1997
1997 1998 revs = list(rl.revs(startrev, len(rl) - 1))
1998 1999
1999 2000 def rlfh(rl):
2000 2001 if rl._inline:
2001 2002 return getsvfs(repo)(rl.indexfile)
2002 2003 else:
2003 2004 return getsvfs(repo)(rl.datafile)
2004 2005
2005 2006 def doread():
2006 2007 rl.clearcaches()
2007 2008 for rev in revs:
2008 2009 segmentforrevs(rev, rev)
2009 2010
2010 2011 def doreadcachedfh():
2011 2012 rl.clearcaches()
2012 2013 fh = rlfh(rl)
2013 2014 for rev in revs:
2014 2015 segmentforrevs(rev, rev, df=fh)
2015 2016
2016 2017 def doreadbatch():
2017 2018 rl.clearcaches()
2018 2019 segmentforrevs(revs[0], revs[-1])
2019 2020
2020 2021 def doreadbatchcachedfh():
2021 2022 rl.clearcaches()
2022 2023 fh = rlfh(rl)
2023 2024 segmentforrevs(revs[0], revs[-1], df=fh)
2024 2025
2025 2026 def dochunk():
2026 2027 rl.clearcaches()
2027 2028 fh = rlfh(rl)
2028 2029 for rev in revs:
2029 2030 rl._chunk(rev, df=fh)
2030 2031
2031 2032 chunks = [None]
2032 2033
2033 2034 def dochunkbatch():
2034 2035 rl.clearcaches()
2035 2036 fh = rlfh(rl)
2036 2037 # Save chunks as a side-effect.
2037 2038 chunks[0] = rl._chunks(revs, df=fh)
2038 2039
2039 2040 def docompress(compressor):
2040 2041 rl.clearcaches()
2041 2042
2042 2043 try:
2043 2044 # Swap in the requested compression engine.
2044 2045 oldcompressor = rl._compressor
2045 2046 rl._compressor = compressor
2046 2047 for chunk in chunks[0]:
2047 2048 rl.compress(chunk)
2048 2049 finally:
2049 2050 rl._compressor = oldcompressor
2050 2051
2051 2052 benches = [
2052 2053 (lambda: doread(), b'read'),
2053 2054 (lambda: doreadcachedfh(), b'read w/ reused fd'),
2054 2055 (lambda: doreadbatch(), b'read batch'),
2055 2056 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
2056 2057 (lambda: dochunk(), b'chunk'),
2057 2058 (lambda: dochunkbatch(), b'chunk batch'),
2058 2059 ]
2059 2060
2060 2061 for engine in sorted(engines):
2061 2062 compressor = util.compengines[engine].revlogcompressor()
2062 2063 benches.append((functools.partial(docompress, compressor),
2063 2064 b'compress w/ %s' % engine))
2064 2065
2065 2066 for fn, title in benches:
2066 2067 timer, fm = gettimer(ui, opts)
2067 2068 timer(fn, title=title)
2068 2069 fm.end()
2069 2070
2070 2071 @command(b'perfrevlogrevision', revlogopts + formatteropts +
2071 2072 [(b'', b'cache', False, b'use caches instead of clearing')],
2072 2073 b'-c|-m|FILE REV')
2073 2074 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
2074 2075 """Benchmark obtaining a revlog revision.
2075 2076
2076 2077 Obtaining a revlog revision consists of roughly the following steps:
2077 2078
2078 2079 1. Compute the delta chain
2079 2080 2. Slice the delta chain if applicable
2080 2081 3. Obtain the raw chunks for that delta chain
2081 2082 4. Decompress each raw chunk
2082 2083 5. Apply binary patches to obtain fulltext
2083 2084 6. Verify hash of fulltext
2084 2085
2085 2086 This command measures the time spent in each of these phases.
2086 2087 """
2087 2088 opts = _byteskwargs(opts)
2088 2089
2089 2090 if opts.get(b'changelog') or opts.get(b'manifest'):
2090 2091 file_, rev = None, file_
2091 2092 elif rev is None:
2092 2093 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
2093 2094
2094 2095 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
2095 2096
2096 2097 # _chunkraw was renamed to _getsegmentforrevs.
2097 2098 try:
2098 2099 segmentforrevs = r._getsegmentforrevs
2099 2100 except AttributeError:
2100 2101 segmentforrevs = r._chunkraw
2101 2102
2102 2103 node = r.lookup(rev)
2103 2104 rev = r.rev(node)
2104 2105
2105 2106 def getrawchunks(data, chain):
2106 2107 start = r.start
2107 2108 length = r.length
2108 2109 inline = r._inline
2109 2110 iosize = r._io.size
2110 2111 buffer = util.buffer
2111 2112
2112 2113 chunks = []
2113 2114 ladd = chunks.append
2114 2115 for idx, item in enumerate(chain):
2115 2116 offset = start(item[0])
2116 2117 bits = data[idx]
2117 2118 for rev in item:
2118 2119 chunkstart = start(rev)
2119 2120 if inline:
2120 2121 chunkstart += (rev + 1) * iosize
2121 2122 chunklength = length(rev)
2122 2123 ladd(buffer(bits, chunkstart - offset, chunklength))
2123 2124
2124 2125 return chunks
2125 2126
2126 2127 def dodeltachain(rev):
2127 2128 if not cache:
2128 2129 r.clearcaches()
2129 2130 r._deltachain(rev)
2130 2131
2131 2132 def doread(chain):
2132 2133 if not cache:
2133 2134 r.clearcaches()
2134 2135 for item in slicedchain:
2135 2136 segmentforrevs(item[0], item[-1])
2136 2137
2137 2138 def doslice(r, chain, size):
2138 2139 for s in slicechunk(r, chain, targetsize=size):
2139 2140 pass
2140 2141
2141 2142 def dorawchunks(data, chain):
2142 2143 if not cache:
2143 2144 r.clearcaches()
2144 2145 getrawchunks(data, chain)
2145 2146
2146 2147 def dodecompress(chunks):
2147 2148 decomp = r.decompress
2148 2149 for chunk in chunks:
2149 2150 decomp(chunk)
2150 2151
2151 2152 def dopatch(text, bins):
2152 2153 if not cache:
2153 2154 r.clearcaches()
2154 2155 mdiff.patches(text, bins)
2155 2156
2156 2157 def dohash(text):
2157 2158 if not cache:
2158 2159 r.clearcaches()
2159 2160 r.checkhash(text, node, rev=rev)
2160 2161
2161 2162 def dorevision():
2162 2163 if not cache:
2163 2164 r.clearcaches()
2164 2165 r.revision(node)
2165 2166
2166 2167 try:
2167 2168 from mercurial.revlogutils.deltas import slicechunk
2168 2169 except ImportError:
2169 2170 slicechunk = getattr(revlog, '_slicechunk', None)
2170 2171
2171 2172 size = r.length(rev)
2172 2173 chain = r._deltachain(rev)[0]
2173 2174 if not getattr(r, '_withsparseread', False):
2174 2175 slicedchain = (chain,)
2175 2176 else:
2176 2177 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
2177 2178 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
2178 2179 rawchunks = getrawchunks(data, slicedchain)
2179 2180 bins = r._chunks(chain)
2180 2181 text = bytes(bins[0])
2181 2182 bins = bins[1:]
2182 2183 text = mdiff.patches(text, bins)
2183 2184
2184 2185 benches = [
2185 2186 (lambda: dorevision(), b'full'),
2186 2187 (lambda: dodeltachain(rev), b'deltachain'),
2187 2188 (lambda: doread(chain), b'read'),
2188 2189 ]
2189 2190
2190 2191 if getattr(r, '_withsparseread', False):
2191 2192 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
2192 2193 benches.append(slicing)
2193 2194
2194 2195 benches.extend([
2195 2196 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
2196 2197 (lambda: dodecompress(rawchunks), b'decompress'),
2197 2198 (lambda: dopatch(text, bins), b'patch'),
2198 2199 (lambda: dohash(text), b'hash'),
2199 2200 ])
2200 2201
2201 2202 timer, fm = gettimer(ui, opts)
2202 2203 for fn, title in benches:
2203 2204 timer(fn, title=title)
2204 2205 fm.end()
2205 2206
2206 2207 @command(b'perfrevset',
2207 2208 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
2208 2209 (b'', b'contexts', False, b'obtain changectx for each revision')]
2209 2210 + formatteropts, b"REVSET")
2210 2211 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
2211 2212 """benchmark the execution time of a revset
2212 2213
2213 2214 Use the --clean option if need to evaluate the impact of build volatile
2214 2215 revisions set cache on the revset execution. Volatile cache hold filtered
2215 2216 and obsolete related cache."""
2216 2217 opts = _byteskwargs(opts)
2217 2218
2218 2219 timer, fm = gettimer(ui, opts)
2219 2220 def d():
2220 2221 if clear:
2221 2222 repo.invalidatevolatilesets()
2222 2223 if contexts:
2223 2224 for ctx in repo.set(expr): pass
2224 2225 else:
2225 2226 for r in repo.revs(expr): pass
2226 2227 timer(d)
2227 2228 fm.end()
2228 2229
2229 2230 @command(b'perfvolatilesets',
2230 2231 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
2231 2232 ] + formatteropts)
2232 2233 def perfvolatilesets(ui, repo, *names, **opts):
2233 2234 """benchmark the computation of various volatile set
2234 2235
2235 2236 Volatile set computes element related to filtering and obsolescence."""
2236 2237 opts = _byteskwargs(opts)
2237 2238 timer, fm = gettimer(ui, opts)
2238 2239 repo = repo.unfiltered()
2239 2240
2240 2241 def getobs(name):
2241 2242 def d():
2242 2243 repo.invalidatevolatilesets()
2243 2244 if opts[b'clear_obsstore']:
2244 2245 clearfilecache(repo, b'obsstore')
2245 2246 obsolete.getrevs(repo, name)
2246 2247 return d
2247 2248
2248 2249 allobs = sorted(obsolete.cachefuncs)
2249 2250 if names:
2250 2251 allobs = [n for n in allobs if n in names]
2251 2252
2252 2253 for name in allobs:
2253 2254 timer(getobs(name), title=name)
2254 2255
2255 2256 def getfiltered(name):
2256 2257 def d():
2257 2258 repo.invalidatevolatilesets()
2258 2259 if opts[b'clear_obsstore']:
2259 2260 clearfilecache(repo, b'obsstore')
2260 2261 repoview.filterrevs(repo, name)
2261 2262 return d
2262 2263
2263 2264 allfilter = sorted(repoview.filtertable)
2264 2265 if names:
2265 2266 allfilter = [n for n in allfilter if n in names]
2266 2267
2267 2268 for name in allfilter:
2268 2269 timer(getfiltered(name), title=name)
2269 2270 fm.end()
2270 2271
2271 2272 @command(b'perfbranchmap',
2272 2273 [(b'f', b'full', False,
2273 2274 b'Includes build time of subset'),
2274 2275 (b'', b'clear-revbranch', False,
2275 2276 b'purge the revbranch cache between computation'),
2276 2277 ] + formatteropts)
2277 2278 def perfbranchmap(ui, repo, *filternames, **opts):
2278 2279 """benchmark the update of a branchmap
2279 2280
2280 2281 This benchmarks the full repo.branchmap() call with read and write disabled
2281 2282 """
2282 2283 opts = _byteskwargs(opts)
2283 2284 full = opts.get(b"full", False)
2284 2285 clear_revbranch = opts.get(b"clear_revbranch", False)
2285 2286 timer, fm = gettimer(ui, opts)
2286 2287 def getbranchmap(filtername):
2287 2288 """generate a benchmark function for the filtername"""
2288 2289 if filtername is None:
2289 2290 view = repo
2290 2291 else:
2291 2292 view = repo.filtered(filtername)
2292 2293 def d():
2293 2294 if clear_revbranch:
2294 2295 repo.revbranchcache()._clear()
2295 2296 if full:
2296 2297 view._branchcaches.clear()
2297 2298 else:
2298 2299 view._branchcaches.pop(filtername, None)
2299 2300 view.branchmap()
2300 2301 return d
2301 2302 # add filter in smaller subset to bigger subset
2302 2303 possiblefilters = set(repoview.filtertable)
2303 2304 if filternames:
2304 2305 possiblefilters &= set(filternames)
2305 2306 subsettable = getbranchmapsubsettable()
2306 2307 allfilters = []
2307 2308 while possiblefilters:
2308 2309 for name in possiblefilters:
2309 2310 subset = subsettable.get(name)
2310 2311 if subset not in possiblefilters:
2311 2312 break
2312 2313 else:
2313 2314 assert False, b'subset cycle %s!' % possiblefilters
2314 2315 allfilters.append(name)
2315 2316 possiblefilters.remove(name)
2316 2317
2317 2318 # warm the cache
2318 2319 if not full:
2319 2320 for name in allfilters:
2320 2321 repo.filtered(name).branchmap()
2321 2322 if not filternames or b'unfiltered' in filternames:
2322 2323 # add unfiltered
2323 2324 allfilters.append(None)
2324 2325
2325 2326 branchcacheread = safeattrsetter(branchmap, b'read')
2326 2327 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
2327 2328 branchcacheread.set(lambda repo: None)
2328 2329 branchcachewrite.set(lambda bc, repo: None)
2329 2330 try:
2330 2331 for name in allfilters:
2331 2332 printname = name
2332 2333 if name is None:
2333 2334 printname = b'unfiltered'
2334 2335 timer(getbranchmap(name), title=str(printname))
2335 2336 finally:
2336 2337 branchcacheread.restore()
2337 2338 branchcachewrite.restore()
2338 2339 fm.end()
2339 2340
2340 2341 @command(b'perfbranchmapupdate', [
2341 2342 (b'', b'base', [], b'subset of revision to start from'),
2342 2343 (b'', b'target', [], b'subset of revision to end with'),
2343 2344 (b'', b'clear-caches', False, b'clear cache between each runs')
2344 2345 ] + formatteropts)
2345 2346 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
2346 2347 """benchmark branchmap update from for <base> revs to <target> revs
2347 2348
2348 2349 If `--clear-caches` is passed, the following items will be reset before
2349 2350 each update:
2350 2351 * the changelog instance and associated indexes
2351 2352 * the rev-branch-cache instance
2352 2353
2353 2354 Examples:
2354 2355
2355 2356 # update for the one last revision
2356 2357 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
2357 2358
2358 2359 $ update for change coming with a new branch
2359 2360 $ hg perfbranchmapupdate --base 'stable' --target 'default'
2360 2361 """
2361 2362 from mercurial import branchmap
2362 2363 from mercurial import repoview
2363 2364 opts = _byteskwargs(opts)
2364 2365 timer, fm = gettimer(ui, opts)
2365 2366 clearcaches = opts[b'clear_caches']
2366 2367 unfi = repo.unfiltered()
2367 2368 x = [None] # used to pass data between closure
2368 2369
2369 2370 # we use a `list` here to avoid possible side effect from smartset
2370 2371 baserevs = list(scmutil.revrange(repo, base))
2371 2372 targetrevs = list(scmutil.revrange(repo, target))
2372 2373 if not baserevs:
2373 2374 raise error.Abort(b'no revisions selected for --base')
2374 2375 if not targetrevs:
2375 2376 raise error.Abort(b'no revisions selected for --target')
2376 2377
2377 2378 # make sure the target branchmap also contains the one in the base
2378 2379 targetrevs = list(set(baserevs) | set(targetrevs))
2379 2380 targetrevs.sort()
2380 2381
2381 2382 cl = repo.changelog
2382 2383 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
2383 2384 allbaserevs.sort()
2384 2385 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
2385 2386
2386 2387 newrevs = list(alltargetrevs.difference(allbaserevs))
2387 2388 newrevs.sort()
2388 2389
2389 2390 allrevs = frozenset(unfi.changelog.revs())
2390 2391 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
2391 2392 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
2392 2393
2393 2394 def basefilter(repo, visibilityexceptions=None):
2394 2395 return basefilterrevs
2395 2396
2396 2397 def targetfilter(repo, visibilityexceptions=None):
2397 2398 return targetfilterrevs
2398 2399
2399 2400 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
2400 2401 ui.status(msg % (len(allbaserevs), len(newrevs)))
2401 2402 if targetfilterrevs:
2402 2403 msg = b'(%d revisions still filtered)\n'
2403 2404 ui.status(msg % len(targetfilterrevs))
2404 2405
2405 2406 try:
2406 2407 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
2407 2408 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
2408 2409
2409 2410 baserepo = repo.filtered(b'__perf_branchmap_update_base')
2410 2411 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
2411 2412
2412 2413 # try to find an existing branchmap to reuse
2413 2414 subsettable = getbranchmapsubsettable()
2414 2415 candidatefilter = subsettable.get(None)
2415 2416 while candidatefilter is not None:
2416 2417 candidatebm = repo.filtered(candidatefilter).branchmap()
2417 2418 if candidatebm.validfor(baserepo):
2418 2419 filtered = repoview.filterrevs(repo, candidatefilter)
2419 2420 missing = [r for r in allbaserevs if r in filtered]
2420 2421 base = candidatebm.copy()
2421 2422 base.update(baserepo, missing)
2422 2423 break
2423 2424 candidatefilter = subsettable.get(candidatefilter)
2424 2425 else:
2425 2426 # no suitable subset where found
2426 2427 base = branchmap.branchcache()
2427 2428 base.update(baserepo, allbaserevs)
2428 2429
2429 2430 def setup():
2430 2431 x[0] = base.copy()
2431 2432 if clearcaches:
2432 2433 unfi._revbranchcache = None
2433 2434 clearchangelog(repo)
2434 2435
2435 2436 def bench():
2436 2437 x[0].update(targetrepo, newrevs)
2437 2438
2438 2439 timer(bench, setup=setup)
2439 2440 fm.end()
2440 2441 finally:
2441 2442 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
2442 2443 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
2443 2444
2444 2445 @command(b'perfbranchmapload', [
2445 2446 (b'f', b'filter', b'', b'Specify repoview filter'),
2446 2447 (b'', b'list', False, b'List brachmap filter caches'),
2447 2448 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
2448 2449
2449 2450 ] + formatteropts)
2450 2451 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
2451 2452 """benchmark reading the branchmap"""
2452 2453 opts = _byteskwargs(opts)
2453 2454 clearrevlogs = opts[b'clear_revlogs']
2454 2455
2455 2456 if list:
2456 2457 for name, kind, st in repo.cachevfs.readdir(stat=True):
2457 2458 if name.startswith(b'branch2'):
2458 2459 filtername = name.partition(b'-')[2] or b'unfiltered'
2459 2460 ui.status(b'%s - %s\n'
2460 2461 % (filtername, util.bytecount(st.st_size)))
2461 2462 return
2462 2463 if not filter:
2463 2464 filter = None
2464 2465 subsettable = getbranchmapsubsettable()
2465 2466 if filter is None:
2466 2467 repo = repo.unfiltered()
2467 2468 else:
2468 2469 repo = repoview.repoview(repo, filter)
2469 2470
2470 2471 repo.branchmap() # make sure we have a relevant, up to date branchmap
2471 2472
2472 2473 currentfilter = filter
2473 2474 # try once without timer, the filter may not be cached
2474 2475 while branchmap.read(repo) is None:
2475 2476 currentfilter = subsettable.get(currentfilter)
2476 2477 if currentfilter is None:
2477 2478 raise error.Abort(b'No branchmap cached for %s repo'
2478 2479 % (filter or b'unfiltered'))
2479 2480 repo = repo.filtered(currentfilter)
2480 2481 timer, fm = gettimer(ui, opts)
2481 2482 def setup():
2482 2483 if clearrevlogs:
2483 2484 clearchangelog(repo)
2484 2485 def bench():
2485 2486 branchmap.read(repo)
2486 2487 timer(bench, setup=setup)
2487 2488 fm.end()
2488 2489
2489 2490 @command(b'perfloadmarkers')
2490 2491 def perfloadmarkers(ui, repo):
2491 2492 """benchmark the time to parse the on-disk markers for a repo
2492 2493
2493 2494 Result is the number of markers in the repo."""
2494 2495 timer, fm = gettimer(ui)
2495 2496 svfs = getsvfs(repo)
2496 2497 timer(lambda: len(obsolete.obsstore(svfs)))
2497 2498 fm.end()
2498 2499
2499 2500 @command(b'perflrucachedict', formatteropts +
2500 2501 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
2501 2502 (b'', b'mincost', 0, b'smallest cost of items in cache'),
2502 2503 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
2503 2504 (b'', b'size', 4, b'size of cache'),
2504 2505 (b'', b'gets', 10000, b'number of key lookups'),
2505 2506 (b'', b'sets', 10000, b'number of key sets'),
2506 2507 (b'', b'mixed', 10000, b'number of mixed mode operations'),
2507 2508 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
2508 2509 norepo=True)
2509 2510 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
2510 2511 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
2511 2512 opts = _byteskwargs(opts)
2512 2513
2513 2514 def doinit():
2514 2515 for i in _xrange(10000):
2515 2516 util.lrucachedict(size)
2516 2517
2517 2518 costrange = list(range(mincost, maxcost + 1))
2518 2519
2519 2520 values = []
2520 2521 for i in _xrange(size):
2521 2522 values.append(random.randint(0, _maxint))
2522 2523
2523 2524 # Get mode fills the cache and tests raw lookup performance with no
2524 2525 # eviction.
2525 2526 getseq = []
2526 2527 for i in _xrange(gets):
2527 2528 getseq.append(random.choice(values))
2528 2529
2529 2530 def dogets():
2530 2531 d = util.lrucachedict(size)
2531 2532 for v in values:
2532 2533 d[v] = v
2533 2534 for key in getseq:
2534 2535 value = d[key]
2535 2536 value # silence pyflakes warning
2536 2537
2537 2538 def dogetscost():
2538 2539 d = util.lrucachedict(size, maxcost=costlimit)
2539 2540 for i, v in enumerate(values):
2540 2541 d.insert(v, v, cost=costs[i])
2541 2542 for key in getseq:
2542 2543 try:
2543 2544 value = d[key]
2544 2545 value # silence pyflakes warning
2545 2546 except KeyError:
2546 2547 pass
2547 2548
2548 2549 # Set mode tests insertion speed with cache eviction.
2549 2550 setseq = []
2550 2551 costs = []
2551 2552 for i in _xrange(sets):
2552 2553 setseq.append(random.randint(0, _maxint))
2553 2554 costs.append(random.choice(costrange))
2554 2555
2555 2556 def doinserts():
2556 2557 d = util.lrucachedict(size)
2557 2558 for v in setseq:
2558 2559 d.insert(v, v)
2559 2560
2560 2561 def doinsertscost():
2561 2562 d = util.lrucachedict(size, maxcost=costlimit)
2562 2563 for i, v in enumerate(setseq):
2563 2564 d.insert(v, v, cost=costs[i])
2564 2565
2565 2566 def dosets():
2566 2567 d = util.lrucachedict(size)
2567 2568 for v in setseq:
2568 2569 d[v] = v
2569 2570
2570 2571 # Mixed mode randomly performs gets and sets with eviction.
2571 2572 mixedops = []
2572 2573 for i in _xrange(mixed):
2573 2574 r = random.randint(0, 100)
2574 2575 if r < mixedgetfreq:
2575 2576 op = 0
2576 2577 else:
2577 2578 op = 1
2578 2579
2579 2580 mixedops.append((op,
2580 2581 random.randint(0, size * 2),
2581 2582 random.choice(costrange)))
2582 2583
2583 2584 def domixed():
2584 2585 d = util.lrucachedict(size)
2585 2586
2586 2587 for op, v, cost in mixedops:
2587 2588 if op == 0:
2588 2589 try:
2589 2590 d[v]
2590 2591 except KeyError:
2591 2592 pass
2592 2593 else:
2593 2594 d[v] = v
2594 2595
2595 2596 def domixedcost():
2596 2597 d = util.lrucachedict(size, maxcost=costlimit)
2597 2598
2598 2599 for op, v, cost in mixedops:
2599 2600 if op == 0:
2600 2601 try:
2601 2602 d[v]
2602 2603 except KeyError:
2603 2604 pass
2604 2605 else:
2605 2606 d.insert(v, v, cost=cost)
2606 2607
2607 2608 benches = [
2608 2609 (doinit, b'init'),
2609 2610 ]
2610 2611
2611 2612 if costlimit:
2612 2613 benches.extend([
2613 2614 (dogetscost, b'gets w/ cost limit'),
2614 2615 (doinsertscost, b'inserts w/ cost limit'),
2615 2616 (domixedcost, b'mixed w/ cost limit'),
2616 2617 ])
2617 2618 else:
2618 2619 benches.extend([
2619 2620 (dogets, b'gets'),
2620 2621 (doinserts, b'inserts'),
2621 2622 (dosets, b'sets'),
2622 2623 (domixed, b'mixed')
2623 2624 ])
2624 2625
2625 2626 for fn, title in benches:
2626 2627 timer, fm = gettimer(ui, opts)
2627 2628 timer(fn, title=title)
2628 2629 fm.end()
2629 2630
2630 2631 @command(b'perfwrite', formatteropts)
2631 2632 def perfwrite(ui, repo, **opts):
2632 2633 """microbenchmark ui.write
2633 2634 """
2634 2635 opts = _byteskwargs(opts)
2635 2636
2636 2637 timer, fm = gettimer(ui, opts)
2637 2638 def write():
2638 2639 for i in range(100000):
2639 2640 ui.write((b'Testing write performance\n'))
2640 2641 timer(write)
2641 2642 fm.end()
2642 2643
2643 2644 def uisetup(ui):
2644 2645 if (util.safehasattr(cmdutil, b'openrevlog') and
2645 2646 not util.safehasattr(commands, b'debugrevlogopts')):
2646 2647 # for "historical portability":
2647 2648 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
2648 2649 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
2649 2650 # openrevlog() should cause failure, because it has been
2650 2651 # available since 3.5 (or 49c583ca48c4).
2651 2652 def openrevlog(orig, repo, cmd, file_, opts):
2652 2653 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
2653 2654 raise error.Abort(b"This version doesn't support --dir option",
2654 2655 hint=b"use 3.5 or later")
2655 2656 return orig(repo, cmd, file_, opts)
2656 2657 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
2657 2658
2658 2659 @command(b'perfprogress', formatteropts + [
2659 2660 (b'', b'topic', b'topic', b'topic for progress messages'),
2660 2661 (b'c', b'total', 1000000, b'total value we are progressing to'),
2661 2662 ], norepo=True)
2662 2663 def perfprogress(ui, topic=None, total=None, **opts):
2663 2664 """printing of progress bars"""
2664 2665 opts = _byteskwargs(opts)
2665 2666
2666 2667 timer, fm = gettimer(ui, opts)
2667 2668
2668 2669 def doprogress():
2669 2670 with ui.makeprogress(topic, total=total) as progress:
2670 2671 for i in pycompat.xrange(total):
2671 2672 progress.increment()
2672 2673
2673 2674 timer(doprogress)
2674 2675 fm.end()
General Comments 0
You need to be logged in to leave comments. Login now