##// END OF EJS Templates
perfrevlogwrite: use progress helper on modern hg...
Martin von Zweigbergk -
r41191:f36fd52d default
parent child Browse files
Show More
@@ -1,2662 +1,2675
1 1 # perf.py - performance test routines
2 2 '''helper extension to measure performance'''
3 3
4 4 # "historical portability" policy of perf.py:
5 5 #
6 6 # We have to do:
7 7 # - make perf.py "loadable" with as wide Mercurial version as possible
8 8 # This doesn't mean that perf commands work correctly with that Mercurial.
9 9 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
10 10 # - make historical perf command work correctly with as wide Mercurial
11 11 # version as possible
12 12 #
13 13 # We have to do, if possible with reasonable cost:
14 14 # - make recent perf command for historical feature work correctly
15 15 # with early Mercurial
16 16 #
17 17 # We don't have to do:
18 18 # - make perf command for recent feature work correctly with early
19 19 # Mercurial
20 20
21 21 from __future__ import absolute_import
22 22 import contextlib
23 23 import functools
24 24 import gc
25 25 import os
26 26 import random
27 27 import shutil
28 28 import struct
29 29 import sys
30 30 import tempfile
31 31 import threading
32 32 import time
33 33 from mercurial import (
34 34 changegroup,
35 35 cmdutil,
36 36 commands,
37 37 copies,
38 38 error,
39 39 extensions,
40 40 hg,
41 41 mdiff,
42 42 merge,
43 43 revlog,
44 44 util,
45 45 )
46 46
47 47 # for "historical portability":
48 48 # try to import modules separately (in dict order), and ignore
49 49 # failure, because these aren't available with early Mercurial
50 50 try:
51 51 from mercurial import branchmap # since 2.5 (or bcee63733aad)
52 52 except ImportError:
53 53 pass
54 54 try:
55 55 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
56 56 except ImportError:
57 57 pass
58 58 try:
59 59 from mercurial import registrar # since 3.7 (or 37d50250b696)
60 60 dir(registrar) # forcibly load it
61 61 except ImportError:
62 62 registrar = None
63 63 try:
64 64 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
65 65 except ImportError:
66 66 pass
67 67 try:
68 68 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
69 69 except ImportError:
70 70 pass
71 71 try:
72 72 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
73 73 except ImportError:
74 74 pass
75 75
76 76
77 77 def identity(a):
78 78 return a
79 79
80 80 try:
81 81 from mercurial import pycompat
82 82 getargspec = pycompat.getargspec # added to module after 4.5
83 83 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
84 84 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
85 85 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
86 86 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
87 87 if pycompat.ispy3:
88 88 _maxint = sys.maxsize # per py3 docs for replacing maxint
89 89 else:
90 90 _maxint = sys.maxint
91 91 except (ImportError, AttributeError):
92 92 import inspect
93 93 getargspec = inspect.getargspec
94 94 _byteskwargs = identity
95 95 fsencode = identity # no py3 support
96 96 _maxint = sys.maxint # no py3 support
97 97 _sysstr = lambda x: x # no py3 support
98 98 _xrange = xrange
99 99
100 100 try:
101 101 # 4.7+
102 102 queue = pycompat.queue.Queue
103 103 except (AttributeError, ImportError):
104 104 # <4.7.
105 105 try:
106 106 queue = pycompat.queue
107 107 except (AttributeError, ImportError):
108 108 queue = util.queue
109 109
110 110 try:
111 111 from mercurial import logcmdutil
112 112 makelogtemplater = logcmdutil.maketemplater
113 113 except (AttributeError, ImportError):
114 114 try:
115 115 makelogtemplater = cmdutil.makelogtemplater
116 116 except (AttributeError, ImportError):
117 117 makelogtemplater = None
118 118
119 119 # for "historical portability":
120 120 # define util.safehasattr forcibly, because util.safehasattr has been
121 121 # available since 1.9.3 (or 94b200a11cf7)
122 122 _undefined = object()
123 123 def safehasattr(thing, attr):
124 124 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
125 125 setattr(util, 'safehasattr', safehasattr)
126 126
127 127 # for "historical portability":
128 128 # define util.timer forcibly, because util.timer has been available
129 129 # since ae5d60bb70c9
130 130 if safehasattr(time, 'perf_counter'):
131 131 util.timer = time.perf_counter
132 132 elif os.name == b'nt':
133 133 util.timer = time.clock
134 134 else:
135 135 util.timer = time.time
136 136
137 137 # for "historical portability":
138 138 # use locally defined empty option list, if formatteropts isn't
139 139 # available, because commands.formatteropts has been available since
140 140 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
141 141 # available since 2.2 (or ae5f92e154d3)
142 142 formatteropts = getattr(cmdutil, "formatteropts",
143 143 getattr(commands, "formatteropts", []))
144 144
145 145 # for "historical portability":
146 146 # use locally defined option list, if debugrevlogopts isn't available,
147 147 # because commands.debugrevlogopts has been available since 3.7 (or
148 148 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
149 149 # since 1.9 (or a79fea6b3e77).
150 150 revlogopts = getattr(cmdutil, "debugrevlogopts",
151 151 getattr(commands, "debugrevlogopts", [
152 152 (b'c', b'changelog', False, (b'open changelog')),
153 153 (b'm', b'manifest', False, (b'open manifest')),
154 154 (b'', b'dir', False, (b'open directory manifest')),
155 155 ]))
156 156
157 157 cmdtable = {}
158 158
159 159 # for "historical portability":
160 160 # define parsealiases locally, because cmdutil.parsealiases has been
161 161 # available since 1.5 (or 6252852b4332)
162 162 def parsealiases(cmd):
163 163 return cmd.split(b"|")
164 164
165 165 if safehasattr(registrar, 'command'):
166 166 command = registrar.command(cmdtable)
167 167 elif safehasattr(cmdutil, 'command'):
168 168 command = cmdutil.command(cmdtable)
169 169 if b'norepo' not in getargspec(command).args:
170 170 # for "historical portability":
171 171 # wrap original cmdutil.command, because "norepo" option has
172 172 # been available since 3.1 (or 75a96326cecb)
173 173 _command = command
174 174 def command(name, options=(), synopsis=None, norepo=False):
175 175 if norepo:
176 176 commands.norepo += b' %s' % b' '.join(parsealiases(name))
177 177 return _command(name, list(options), synopsis)
178 178 else:
179 179 # for "historical portability":
180 180 # define "@command" annotation locally, because cmdutil.command
181 181 # has been available since 1.9 (or 2daa5179e73f)
182 182 def command(name, options=(), synopsis=None, norepo=False):
183 183 def decorator(func):
184 184 if synopsis:
185 185 cmdtable[name] = func, list(options), synopsis
186 186 else:
187 187 cmdtable[name] = func, list(options)
188 188 if norepo:
189 189 commands.norepo += b' %s' % b' '.join(parsealiases(name))
190 190 return func
191 191 return decorator
192 192
193 193 try:
194 194 import mercurial.registrar
195 195 import mercurial.configitems
196 196 configtable = {}
197 197 configitem = mercurial.registrar.configitem(configtable)
198 198 configitem(b'perf', b'presleep',
199 199 default=mercurial.configitems.dynamicdefault,
200 200 )
201 201 configitem(b'perf', b'stub',
202 202 default=mercurial.configitems.dynamicdefault,
203 203 )
204 204 configitem(b'perf', b'parentscount',
205 205 default=mercurial.configitems.dynamicdefault,
206 206 )
207 207 configitem(b'perf', b'all-timing',
208 208 default=mercurial.configitems.dynamicdefault,
209 209 )
210 210 except (ImportError, AttributeError):
211 211 pass
212 212
213 213 def getlen(ui):
214 214 if ui.configbool(b"perf", b"stub", False):
215 215 return lambda x: 1
216 216 return len
217 217
218 218 def gettimer(ui, opts=None):
219 219 """return a timer function and formatter: (timer, formatter)
220 220
221 221 This function exists to gather the creation of formatter in a single
222 222 place instead of duplicating it in all performance commands."""
223 223
224 224 # enforce an idle period before execution to counteract power management
225 225 # experimental config: perf.presleep
226 226 time.sleep(getint(ui, b"perf", b"presleep", 1))
227 227
228 228 if opts is None:
229 229 opts = {}
230 230 # redirect all to stderr unless buffer api is in use
231 231 if not ui._buffers:
232 232 ui = ui.copy()
233 233 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
234 234 if uifout:
235 235 # for "historical portability":
236 236 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
237 237 uifout.set(ui.ferr)
238 238
239 239 # get a formatter
240 240 uiformatter = getattr(ui, 'formatter', None)
241 241 if uiformatter:
242 242 fm = uiformatter(b'perf', opts)
243 243 else:
244 244 # for "historical portability":
245 245 # define formatter locally, because ui.formatter has been
246 246 # available since 2.2 (or ae5f92e154d3)
247 247 from mercurial import node
248 248 class defaultformatter(object):
249 249 """Minimized composition of baseformatter and plainformatter
250 250 """
251 251 def __init__(self, ui, topic, opts):
252 252 self._ui = ui
253 253 if ui.debugflag:
254 254 self.hexfunc = node.hex
255 255 else:
256 256 self.hexfunc = node.short
257 257 def __nonzero__(self):
258 258 return False
259 259 __bool__ = __nonzero__
260 260 def startitem(self):
261 261 pass
262 262 def data(self, **data):
263 263 pass
264 264 def write(self, fields, deftext, *fielddata, **opts):
265 265 self._ui.write(deftext % fielddata, **opts)
266 266 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
267 267 if cond:
268 268 self._ui.write(deftext % fielddata, **opts)
269 269 def plain(self, text, **opts):
270 270 self._ui.write(text, **opts)
271 271 def end(self):
272 272 pass
273 273 fm = defaultformatter(ui, b'perf', opts)
274 274
275 275 # stub function, runs code only once instead of in a loop
276 276 # experimental config: perf.stub
277 277 if ui.configbool(b"perf", b"stub", False):
278 278 return functools.partial(stub_timer, fm), fm
279 279
280 280 # experimental config: perf.all-timing
281 281 displayall = ui.configbool(b"perf", b"all-timing", False)
282 282 return functools.partial(_timer, fm, displayall=displayall), fm
283 283
284 284 def stub_timer(fm, func, setup=None, title=None):
285 285 if setup is not None:
286 286 setup()
287 287 func()
288 288
289 289 @contextlib.contextmanager
290 290 def timeone():
291 291 r = []
292 292 ostart = os.times()
293 293 cstart = util.timer()
294 294 yield r
295 295 cstop = util.timer()
296 296 ostop = os.times()
297 297 a, b = ostart, ostop
298 298 r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
299 299
300 300 def _timer(fm, func, setup=None, title=None, displayall=False):
301 301 gc.collect()
302 302 results = []
303 303 begin = util.timer()
304 304 count = 0
305 305 while True:
306 306 if setup is not None:
307 307 setup()
308 308 with timeone() as item:
309 309 r = func()
310 310 count += 1
311 311 results.append(item[0])
312 312 cstop = util.timer()
313 313 if cstop - begin > 3 and count >= 100:
314 314 break
315 315 if cstop - begin > 10 and count >= 3:
316 316 break
317 317
318 318 formatone(fm, results, title=title, result=r,
319 319 displayall=displayall)
320 320
321 321 def formatone(fm, timings, title=None, result=None, displayall=False):
322 322
323 323 count = len(timings)
324 324
325 325 fm.startitem()
326 326
327 327 if title:
328 328 fm.write(b'title', b'! %s\n', title)
329 329 if result:
330 330 fm.write(b'result', b'! result: %s\n', result)
331 331 def display(role, entry):
332 332 prefix = b''
333 333 if role != b'best':
334 334 prefix = b'%s.' % role
335 335 fm.plain(b'!')
336 336 fm.write(prefix + b'wall', b' wall %f', entry[0])
337 337 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
338 338 fm.write(prefix + b'user', b' user %f', entry[1])
339 339 fm.write(prefix + b'sys', b' sys %f', entry[2])
340 340 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
341 341 fm.plain(b'\n')
342 342 timings.sort()
343 343 min_val = timings[0]
344 344 display(b'best', min_val)
345 345 if displayall:
346 346 max_val = timings[-1]
347 347 display(b'max', max_val)
348 348 avg = tuple([sum(x) / count for x in zip(*timings)])
349 349 display(b'avg', avg)
350 350 median = timings[len(timings) // 2]
351 351 display(b'median', median)
352 352
353 353 # utilities for historical portability
354 354
355 355 def getint(ui, section, name, default):
356 356 # for "historical portability":
357 357 # ui.configint has been available since 1.9 (or fa2b596db182)
358 358 v = ui.config(section, name, None)
359 359 if v is None:
360 360 return default
361 361 try:
362 362 return int(v)
363 363 except ValueError:
364 364 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
365 365 % (section, name, v))
366 366
367 367 def safeattrsetter(obj, name, ignoremissing=False):
368 368 """Ensure that 'obj' has 'name' attribute before subsequent setattr
369 369
370 370 This function is aborted, if 'obj' doesn't have 'name' attribute
371 371 at runtime. This avoids overlooking removal of an attribute, which
372 372 breaks assumption of performance measurement, in the future.
373 373
374 374 This function returns the object to (1) assign a new value, and
375 375 (2) restore an original value to the attribute.
376 376
377 377 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
378 378 abortion, and this function returns None. This is useful to
379 379 examine an attribute, which isn't ensured in all Mercurial
380 380 versions.
381 381 """
382 382 if not util.safehasattr(obj, name):
383 383 if ignoremissing:
384 384 return None
385 385 raise error.Abort((b"missing attribute %s of %s might break assumption"
386 386 b" of performance measurement") % (name, obj))
387 387
388 388 origvalue = getattr(obj, _sysstr(name))
389 389 class attrutil(object):
390 390 def set(self, newvalue):
391 391 setattr(obj, _sysstr(name), newvalue)
392 392 def restore(self):
393 393 setattr(obj, _sysstr(name), origvalue)
394 394
395 395 return attrutil()
396 396
397 397 # utilities to examine each internal API changes
398 398
399 399 def getbranchmapsubsettable():
400 400 # for "historical portability":
401 401 # subsettable is defined in:
402 402 # - branchmap since 2.9 (or 175c6fd8cacc)
403 403 # - repoview since 2.5 (or 59a9f18d4587)
404 404 for mod in (branchmap, repoview):
405 405 subsettable = getattr(mod, 'subsettable', None)
406 406 if subsettable:
407 407 return subsettable
408 408
409 409 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
410 410 # branchmap and repoview modules exist, but subsettable attribute
411 411 # doesn't)
412 412 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
413 413 hint=b"use 2.5 or later")
414 414
415 415 def getsvfs(repo):
416 416 """Return appropriate object to access files under .hg/store
417 417 """
418 418 # for "historical portability":
419 419 # repo.svfs has been available since 2.3 (or 7034365089bf)
420 420 svfs = getattr(repo, 'svfs', None)
421 421 if svfs:
422 422 return svfs
423 423 else:
424 424 return getattr(repo, 'sopener')
425 425
426 426 def getvfs(repo):
427 427 """Return appropriate object to access files under .hg
428 428 """
429 429 # for "historical portability":
430 430 # repo.vfs has been available since 2.3 (or 7034365089bf)
431 431 vfs = getattr(repo, 'vfs', None)
432 432 if vfs:
433 433 return vfs
434 434 else:
435 435 return getattr(repo, 'opener')
436 436
437 437 def repocleartagscachefunc(repo):
438 438 """Return the function to clear tags cache according to repo internal API
439 439 """
440 440 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
441 441 # in this case, setattr(repo, '_tagscache', None) or so isn't
442 442 # correct way to clear tags cache, because existing code paths
443 443 # expect _tagscache to be a structured object.
444 444 def clearcache():
445 445 # _tagscache has been filteredpropertycache since 2.5 (or
446 446 # 98c867ac1330), and delattr() can't work in such case
447 447 if b'_tagscache' in vars(repo):
448 448 del repo.__dict__[b'_tagscache']
449 449 return clearcache
450 450
451 451 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
452 452 if repotags: # since 1.4 (or 5614a628d173)
453 453 return lambda : repotags.set(None)
454 454
455 455 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
456 456 if repotagscache: # since 0.6 (or d7df759d0e97)
457 457 return lambda : repotagscache.set(None)
458 458
459 459 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
460 460 # this point, but it isn't so problematic, because:
461 461 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
462 462 # in perftags() causes failure soon
463 463 # - perf.py itself has been available since 1.1 (or eb240755386d)
464 464 raise error.Abort((b"tags API of this hg command is unknown"))
465 465
466 466 # utilities to clear cache
467 467
468 468 def clearfilecache(obj, attrname):
469 469 unfiltered = getattr(obj, 'unfiltered', None)
470 470 if unfiltered is not None:
471 471 obj = obj.unfiltered()
472 472 if attrname in vars(obj):
473 473 delattr(obj, attrname)
474 474 obj._filecache.pop(attrname, None)
475 475
476 476 def clearchangelog(repo):
477 477 if repo is not repo.unfiltered():
478 478 object.__setattr__(repo, r'_clcachekey', None)
479 479 object.__setattr__(repo, r'_clcache', None)
480 480 clearfilecache(repo.unfiltered(), 'changelog')
481 481
482 482 # perf commands
483 483
484 484 @command(b'perfwalk', formatteropts)
485 485 def perfwalk(ui, repo, *pats, **opts):
486 486 opts = _byteskwargs(opts)
487 487 timer, fm = gettimer(ui, opts)
488 488 m = scmutil.match(repo[None], pats, {})
489 489 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
490 490 ignored=False))))
491 491 fm.end()
492 492
493 493 @command(b'perfannotate', formatteropts)
494 494 def perfannotate(ui, repo, f, **opts):
495 495 opts = _byteskwargs(opts)
496 496 timer, fm = gettimer(ui, opts)
497 497 fc = repo[b'.'][f]
498 498 timer(lambda: len(fc.annotate(True)))
499 499 fm.end()
500 500
501 501 @command(b'perfstatus',
502 502 [(b'u', b'unknown', False,
503 503 b'ask status to look for unknown files')] + formatteropts)
504 504 def perfstatus(ui, repo, **opts):
505 505 opts = _byteskwargs(opts)
506 506 #m = match.always(repo.root, repo.getcwd())
507 507 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
508 508 # False))))
509 509 timer, fm = gettimer(ui, opts)
510 510 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
511 511 fm.end()
512 512
513 513 @command(b'perfaddremove', formatteropts)
514 514 def perfaddremove(ui, repo, **opts):
515 515 opts = _byteskwargs(opts)
516 516 timer, fm = gettimer(ui, opts)
517 517 try:
518 518 oldquiet = repo.ui.quiet
519 519 repo.ui.quiet = True
520 520 matcher = scmutil.match(repo[None])
521 521 opts[b'dry_run'] = True
522 522 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
523 523 finally:
524 524 repo.ui.quiet = oldquiet
525 525 fm.end()
526 526
527 527 def clearcaches(cl):
528 528 # behave somewhat consistently across internal API changes
529 529 if util.safehasattr(cl, b'clearcaches'):
530 530 cl.clearcaches()
531 531 elif util.safehasattr(cl, b'_nodecache'):
532 532 from mercurial.node import nullid, nullrev
533 533 cl._nodecache = {nullid: nullrev}
534 534 cl._nodepos = None
535 535
536 536 @command(b'perfheads', formatteropts)
537 537 def perfheads(ui, repo, **opts):
538 538 opts = _byteskwargs(opts)
539 539 timer, fm = gettimer(ui, opts)
540 540 cl = repo.changelog
541 541 def d():
542 542 len(cl.headrevs())
543 543 clearcaches(cl)
544 544 timer(d)
545 545 fm.end()
546 546
547 547 @command(b'perftags', formatteropts+
548 548 [
549 549 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
550 550 ])
551 551 def perftags(ui, repo, **opts):
552 552 opts = _byteskwargs(opts)
553 553 timer, fm = gettimer(ui, opts)
554 554 repocleartagscache = repocleartagscachefunc(repo)
555 555 clearrevlogs = opts[b'clear_revlogs']
556 556 def s():
557 557 if clearrevlogs:
558 558 clearchangelog(repo)
559 559 clearfilecache(repo.unfiltered(), 'manifest')
560 560 repocleartagscache()
561 561 def t():
562 562 return len(repo.tags())
563 563 timer(t, setup=s)
564 564 fm.end()
565 565
566 566 @command(b'perfancestors', formatteropts)
567 567 def perfancestors(ui, repo, **opts):
568 568 opts = _byteskwargs(opts)
569 569 timer, fm = gettimer(ui, opts)
570 570 heads = repo.changelog.headrevs()
571 571 def d():
572 572 for a in repo.changelog.ancestors(heads):
573 573 pass
574 574 timer(d)
575 575 fm.end()
576 576
577 577 @command(b'perfancestorset', formatteropts)
578 578 def perfancestorset(ui, repo, revset, **opts):
579 579 opts = _byteskwargs(opts)
580 580 timer, fm = gettimer(ui, opts)
581 581 revs = repo.revs(revset)
582 582 heads = repo.changelog.headrevs()
583 583 def d():
584 584 s = repo.changelog.ancestors(heads)
585 585 for rev in revs:
586 586 rev in s
587 587 timer(d)
588 588 fm.end()
589 589
590 590 @command(b'perfdiscovery', formatteropts, b'PATH')
591 591 def perfdiscovery(ui, repo, path, **opts):
592 592 """benchmark discovery between local repo and the peer at given path
593 593 """
594 594 repos = [repo, None]
595 595 timer, fm = gettimer(ui, opts)
596 596 path = ui.expandpath(path)
597 597
598 598 def s():
599 599 repos[1] = hg.peer(ui, opts, path)
600 600 def d():
601 601 setdiscovery.findcommonheads(ui, *repos)
602 602 timer(d, setup=s)
603 603 fm.end()
604 604
605 605 @command(b'perfbookmarks', formatteropts +
606 606 [
607 607 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
608 608 ])
609 609 def perfbookmarks(ui, repo, **opts):
610 610 """benchmark parsing bookmarks from disk to memory"""
611 611 opts = _byteskwargs(opts)
612 612 timer, fm = gettimer(ui, opts)
613 613
614 614 clearrevlogs = opts[b'clear_revlogs']
615 615 def s():
616 616 if clearrevlogs:
617 617 clearchangelog(repo)
618 618 clearfilecache(repo, b'_bookmarks')
619 619 def d():
620 620 repo._bookmarks
621 621 timer(d, setup=s)
622 622 fm.end()
623 623
624 624 @command(b'perfbundleread', formatteropts, b'BUNDLE')
625 625 def perfbundleread(ui, repo, bundlepath, **opts):
626 626 """Benchmark reading of bundle files.
627 627
628 628 This command is meant to isolate the I/O part of bundle reading as
629 629 much as possible.
630 630 """
631 631 from mercurial import (
632 632 bundle2,
633 633 exchange,
634 634 streamclone,
635 635 )
636 636
637 637 opts = _byteskwargs(opts)
638 638
639 639 def makebench(fn):
640 640 def run():
641 641 with open(bundlepath, b'rb') as fh:
642 642 bundle = exchange.readbundle(ui, fh, bundlepath)
643 643 fn(bundle)
644 644
645 645 return run
646 646
647 647 def makereadnbytes(size):
648 648 def run():
649 649 with open(bundlepath, b'rb') as fh:
650 650 bundle = exchange.readbundle(ui, fh, bundlepath)
651 651 while bundle.read(size):
652 652 pass
653 653
654 654 return run
655 655
656 656 def makestdioread(size):
657 657 def run():
658 658 with open(bundlepath, b'rb') as fh:
659 659 while fh.read(size):
660 660 pass
661 661
662 662 return run
663 663
664 664 # bundle1
665 665
666 666 def deltaiter(bundle):
667 667 for delta in bundle.deltaiter():
668 668 pass
669 669
670 670 def iterchunks(bundle):
671 671 for chunk in bundle.getchunks():
672 672 pass
673 673
674 674 # bundle2
675 675
676 676 def forwardchunks(bundle):
677 677 for chunk in bundle._forwardchunks():
678 678 pass
679 679
680 680 def iterparts(bundle):
681 681 for part in bundle.iterparts():
682 682 pass
683 683
684 684 def iterpartsseekable(bundle):
685 685 for part in bundle.iterparts(seekable=True):
686 686 pass
687 687
688 688 def seek(bundle):
689 689 for part in bundle.iterparts(seekable=True):
690 690 part.seek(0, os.SEEK_END)
691 691
692 692 def makepartreadnbytes(size):
693 693 def run():
694 694 with open(bundlepath, b'rb') as fh:
695 695 bundle = exchange.readbundle(ui, fh, bundlepath)
696 696 for part in bundle.iterparts():
697 697 while part.read(size):
698 698 pass
699 699
700 700 return run
701 701
702 702 benches = [
703 703 (makestdioread(8192), b'read(8k)'),
704 704 (makestdioread(16384), b'read(16k)'),
705 705 (makestdioread(32768), b'read(32k)'),
706 706 (makestdioread(131072), b'read(128k)'),
707 707 ]
708 708
709 709 with open(bundlepath, b'rb') as fh:
710 710 bundle = exchange.readbundle(ui, fh, bundlepath)
711 711
712 712 if isinstance(bundle, changegroup.cg1unpacker):
713 713 benches.extend([
714 714 (makebench(deltaiter), b'cg1 deltaiter()'),
715 715 (makebench(iterchunks), b'cg1 getchunks()'),
716 716 (makereadnbytes(8192), b'cg1 read(8k)'),
717 717 (makereadnbytes(16384), b'cg1 read(16k)'),
718 718 (makereadnbytes(32768), b'cg1 read(32k)'),
719 719 (makereadnbytes(131072), b'cg1 read(128k)'),
720 720 ])
721 721 elif isinstance(bundle, bundle2.unbundle20):
722 722 benches.extend([
723 723 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
724 724 (makebench(iterparts), b'bundle2 iterparts()'),
725 725 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
726 726 (makebench(seek), b'bundle2 part seek()'),
727 727 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
728 728 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
729 729 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
730 730 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
731 731 ])
732 732 elif isinstance(bundle, streamclone.streamcloneapplier):
733 733 raise error.Abort(b'stream clone bundles not supported')
734 734 else:
735 735 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
736 736
737 737 for fn, title in benches:
738 738 timer, fm = gettimer(ui, opts)
739 739 timer(fn, title=title)
740 740 fm.end()
741 741
742 742 @command(b'perfchangegroupchangelog', formatteropts +
743 743 [(b'', b'cgversion', b'02', b'changegroup version'),
744 744 (b'r', b'rev', b'', b'revisions to add to changegroup')])
745 745 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
746 746 """Benchmark producing a changelog group for a changegroup.
747 747
748 748 This measures the time spent processing the changelog during a
749 749 bundle operation. This occurs during `hg bundle` and on a server
750 750 processing a `getbundle` wire protocol request (handles clones
751 751 and pull requests).
752 752
753 753 By default, all revisions are added to the changegroup.
754 754 """
755 755 opts = _byteskwargs(opts)
756 756 cl = repo.changelog
757 757 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
758 758 bundler = changegroup.getbundler(cgversion, repo)
759 759
760 760 def d():
761 761 state, chunks = bundler._generatechangelog(cl, nodes)
762 762 for chunk in chunks:
763 763 pass
764 764
765 765 timer, fm = gettimer(ui, opts)
766 766
767 767 # Terminal printing can interfere with timing. So disable it.
768 768 with ui.configoverride({(b'progress', b'disable'): True}):
769 769 timer(d)
770 770
771 771 fm.end()
772 772
773 773 @command(b'perfdirs', formatteropts)
774 774 def perfdirs(ui, repo, **opts):
775 775 opts = _byteskwargs(opts)
776 776 timer, fm = gettimer(ui, opts)
777 777 dirstate = repo.dirstate
778 778 b'a' in dirstate
779 779 def d():
780 780 dirstate.hasdir(b'a')
781 781 del dirstate._map._dirs
782 782 timer(d)
783 783 fm.end()
784 784
785 785 @command(b'perfdirstate', formatteropts)
786 786 def perfdirstate(ui, repo, **opts):
787 787 opts = _byteskwargs(opts)
788 788 timer, fm = gettimer(ui, opts)
789 789 b"a" in repo.dirstate
790 790 def d():
791 791 repo.dirstate.invalidate()
792 792 b"a" in repo.dirstate
793 793 timer(d)
794 794 fm.end()
795 795
796 796 @command(b'perfdirstatedirs', formatteropts)
797 797 def perfdirstatedirs(ui, repo, **opts):
798 798 opts = _byteskwargs(opts)
799 799 timer, fm = gettimer(ui, opts)
800 800 b"a" in repo.dirstate
801 801 def d():
802 802 repo.dirstate.hasdir(b"a")
803 803 del repo.dirstate._map._dirs
804 804 timer(d)
805 805 fm.end()
806 806
807 807 @command(b'perfdirstatefoldmap', formatteropts)
808 808 def perfdirstatefoldmap(ui, repo, **opts):
809 809 opts = _byteskwargs(opts)
810 810 timer, fm = gettimer(ui, opts)
811 811 dirstate = repo.dirstate
812 812 b'a' in dirstate
813 813 def d():
814 814 dirstate._map.filefoldmap.get(b'a')
815 815 del dirstate._map.filefoldmap
816 816 timer(d)
817 817 fm.end()
818 818
819 819 @command(b'perfdirfoldmap', formatteropts)
820 820 def perfdirfoldmap(ui, repo, **opts):
821 821 opts = _byteskwargs(opts)
822 822 timer, fm = gettimer(ui, opts)
823 823 dirstate = repo.dirstate
824 824 b'a' in dirstate
825 825 def d():
826 826 dirstate._map.dirfoldmap.get(b'a')
827 827 del dirstate._map.dirfoldmap
828 828 del dirstate._map._dirs
829 829 timer(d)
830 830 fm.end()
831 831
832 832 @command(b'perfdirstatewrite', formatteropts)
833 833 def perfdirstatewrite(ui, repo, **opts):
834 834 opts = _byteskwargs(opts)
835 835 timer, fm = gettimer(ui, opts)
836 836 ds = repo.dirstate
837 837 b"a" in ds
838 838 def d():
839 839 ds._dirty = True
840 840 ds.write(repo.currenttransaction())
841 841 timer(d)
842 842 fm.end()
843 843
844 844 @command(b'perfmergecalculate',
845 845 [(b'r', b'rev', b'.', b'rev to merge against')] + formatteropts)
846 846 def perfmergecalculate(ui, repo, rev, **opts):
847 847 opts = _byteskwargs(opts)
848 848 timer, fm = gettimer(ui, opts)
849 849 wctx = repo[None]
850 850 rctx = scmutil.revsingle(repo, rev, rev)
851 851 ancestor = wctx.ancestor(rctx)
852 852 # we don't want working dir files to be stat'd in the benchmark, so prime
853 853 # that cache
854 854 wctx.dirty()
855 855 def d():
856 856 # acceptremote is True because we don't want prompts in the middle of
857 857 # our benchmark
858 858 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
859 859 acceptremote=True, followcopies=True)
860 860 timer(d)
861 861 fm.end()
862 862
863 863 @command(b'perfpathcopies', [], b"REV REV")
864 864 def perfpathcopies(ui, repo, rev1, rev2, **opts):
865 865 """benchmark the copy tracing logic"""
866 866 opts = _byteskwargs(opts)
867 867 timer, fm = gettimer(ui, opts)
868 868 ctx1 = scmutil.revsingle(repo, rev1, rev1)
869 869 ctx2 = scmutil.revsingle(repo, rev2, rev2)
870 870 def d():
871 871 copies.pathcopies(ctx1, ctx2)
872 872 timer(d)
873 873 fm.end()
874 874
875 875 @command(b'perfphases',
876 876 [(b'', b'full', False, b'include file reading time too'),
877 877 ], b"")
878 878 def perfphases(ui, repo, **opts):
879 879 """benchmark phasesets computation"""
880 880 opts = _byteskwargs(opts)
881 881 timer, fm = gettimer(ui, opts)
882 882 _phases = repo._phasecache
883 883 full = opts.get(b'full')
884 884 def d():
885 885 phases = _phases
886 886 if full:
887 887 clearfilecache(repo, b'_phasecache')
888 888 phases = repo._phasecache
889 889 phases.invalidate()
890 890 phases.loadphaserevs(repo)
891 891 timer(d)
892 892 fm.end()
893 893
894 894 @command(b'perfphasesremote',
895 895 [], b"[DEST]")
896 896 def perfphasesremote(ui, repo, dest=None, **opts):
897 897 """benchmark time needed to analyse phases of the remote server"""
898 898 from mercurial.node import (
899 899 bin,
900 900 )
901 901 from mercurial import (
902 902 exchange,
903 903 hg,
904 904 phases,
905 905 )
906 906 opts = _byteskwargs(opts)
907 907 timer, fm = gettimer(ui, opts)
908 908
909 909 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
910 910 if not path:
911 911 raise error.Abort((b'default repository not configured!'),
912 912 hint=(b"see 'hg help config.paths'"))
913 913 dest = path.pushloc or path.loc
914 914 branches = (path.branch, opts.get(b'branch') or [])
915 915 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
916 916 revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get(b'rev'))
917 917 other = hg.peer(repo, opts, dest)
918 918
919 919 # easier to perform discovery through the operation
920 920 op = exchange.pushoperation(repo, other)
921 921 exchange._pushdiscoverychangeset(op)
922 922
923 923 remotesubset = op.fallbackheads
924 924
925 925 with other.commandexecutor() as e:
926 926 remotephases = e.callcommand(b'listkeys',
927 927 {b'namespace': b'phases'}).result()
928 928 del other
929 929 publishing = remotephases.get(b'publishing', False)
930 930 if publishing:
931 931 ui.status((b'publishing: yes\n'))
932 932 else:
933 933 ui.status((b'publishing: no\n'))
934 934
935 935 nodemap = repo.changelog.nodemap
936 936 nonpublishroots = 0
937 937 for nhex, phase in remotephases.iteritems():
938 938 if nhex == b'publishing': # ignore data related to publish option
939 939 continue
940 940 node = bin(nhex)
941 941 if node in nodemap and int(phase):
942 942 nonpublishroots += 1
943 943 ui.status((b'number of roots: %d\n') % len(remotephases))
944 944 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
945 945 def d():
946 946 phases.remotephasessummary(repo,
947 947 remotesubset,
948 948 remotephases)
949 949 timer(d)
950 950 fm.end()
951 951
952 952 @command(b'perfmanifest',[
953 953 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
954 954 (b'', b'clear-disk', False, b'clear on-disk caches too'),
955 955 ] + formatteropts, b'REV|NODE')
956 956 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
957 957 """benchmark the time to read a manifest from disk and return a usable
958 958 dict-like object
959 959
960 960 Manifest caches are cleared before retrieval."""
961 961 opts = _byteskwargs(opts)
962 962 timer, fm = gettimer(ui, opts)
963 963 if not manifest_rev:
964 964 ctx = scmutil.revsingle(repo, rev, rev)
965 965 t = ctx.manifestnode()
966 966 else:
967 967 from mercurial.node import bin
968 968
969 969 if len(rev) == 40:
970 970 t = bin(rev)
971 971 else:
972 972 try:
973 973 rev = int(rev)
974 974
975 975 if util.safehasattr(repo.manifestlog, b'getstorage'):
976 976 t = repo.manifestlog.getstorage(b'').node(rev)
977 977 else:
978 978 t = repo.manifestlog._revlog.lookup(rev)
979 979 except ValueError:
980 980 raise error.Abort(b'manifest revision must be integer or full '
981 981 b'node')
982 982 def d():
983 983 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
984 984 repo.manifestlog[t].read()
985 985 timer(d)
986 986 fm.end()
987 987
988 988 @command(b'perfchangeset', formatteropts)
989 989 def perfchangeset(ui, repo, rev, **opts):
990 990 opts = _byteskwargs(opts)
991 991 timer, fm = gettimer(ui, opts)
992 992 n = scmutil.revsingle(repo, rev).node()
993 993 def d():
994 994 repo.changelog.read(n)
995 995 #repo.changelog._cache = None
996 996 timer(d)
997 997 fm.end()
998 998
999 999 @command(b'perfignore', formatteropts)
1000 1000 def perfignore(ui, repo, **opts):
1001 1001 """benchmark operation related to computing ignore"""
1002 1002 opts = _byteskwargs(opts)
1003 1003 timer, fm = gettimer(ui, opts)
1004 1004 dirstate = repo.dirstate
1005 1005
1006 1006 def setupone():
1007 1007 dirstate.invalidate()
1008 1008 clearfilecache(dirstate, b'_ignore')
1009 1009
1010 1010 def runone():
1011 1011 dirstate._ignore
1012 1012
1013 1013 timer(runone, setup=setupone, title=b"load")
1014 1014 fm.end()
1015 1015
1016 1016 @command(b'perfindex', [
1017 1017 (b'', b'rev', b'', b'revision to be looked up (default tip)'),
1018 1018 ] + formatteropts)
1019 1019 def perfindex(ui, repo, **opts):
1020 1020 import mercurial.revlog
1021 1021 opts = _byteskwargs(opts)
1022 1022 timer, fm = gettimer(ui, opts)
1023 1023 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1024 1024 if opts[b'rev'] is None:
1025 1025 n = repo[b"tip"].node()
1026 1026 else:
1027 1027 rev = scmutil.revsingle(repo, opts[b'rev'])
1028 1028 n = repo[rev].node()
1029 1029
1030 1030 unfi = repo.unfiltered()
1031 1031 # find the filecache func directly
1032 1032 # This avoid polluting the benchmark with the filecache logic
1033 1033 makecl = unfi.__class__.changelog.func
1034 1034 def setup():
1035 1035 # probably not necessary, but for good measure
1036 1036 clearchangelog(unfi)
1037 1037 def d():
1038 1038 cl = makecl(unfi)
1039 1039 cl.rev(n)
1040 1040 timer(d, setup=setup)
1041 1041 fm.end()
1042 1042
1043 1043 @command(b'perfstartup', formatteropts)
1044 1044 def perfstartup(ui, repo, **opts):
1045 1045 opts = _byteskwargs(opts)
1046 1046 timer, fm = gettimer(ui, opts)
1047 1047 def d():
1048 1048 if os.name != r'nt':
1049 1049 os.system(b"HGRCPATH= %s version -q > /dev/null" %
1050 1050 fsencode(sys.argv[0]))
1051 1051 else:
1052 1052 os.environ[r'HGRCPATH'] = r' '
1053 1053 os.system(r"%s version -q > NUL" % sys.argv[0])
1054 1054 timer(d)
1055 1055 fm.end()
1056 1056
1057 1057 @command(b'perfparents', formatteropts)
1058 1058 def perfparents(ui, repo, **opts):
1059 1059 opts = _byteskwargs(opts)
1060 1060 timer, fm = gettimer(ui, opts)
1061 1061 # control the number of commits perfparents iterates over
1062 1062 # experimental config: perf.parentscount
1063 1063 count = getint(ui, b"perf", b"parentscount", 1000)
1064 1064 if len(repo.changelog) < count:
1065 1065 raise error.Abort(b"repo needs %d commits for this test" % count)
1066 1066 repo = repo.unfiltered()
1067 1067 nl = [repo.changelog.node(i) for i in _xrange(count)]
1068 1068 def d():
1069 1069 for n in nl:
1070 1070 repo.changelog.parents(n)
1071 1071 timer(d)
1072 1072 fm.end()
1073 1073
1074 1074 @command(b'perfctxfiles', formatteropts)
1075 1075 def perfctxfiles(ui, repo, x, **opts):
1076 1076 opts = _byteskwargs(opts)
1077 1077 x = int(x)
1078 1078 timer, fm = gettimer(ui, opts)
1079 1079 def d():
1080 1080 len(repo[x].files())
1081 1081 timer(d)
1082 1082 fm.end()
1083 1083
1084 1084 @command(b'perfrawfiles', formatteropts)
1085 1085 def perfrawfiles(ui, repo, x, **opts):
1086 1086 opts = _byteskwargs(opts)
1087 1087 x = int(x)
1088 1088 timer, fm = gettimer(ui, opts)
1089 1089 cl = repo.changelog
1090 1090 def d():
1091 1091 len(cl.read(x)[3])
1092 1092 timer(d)
1093 1093 fm.end()
1094 1094
1095 1095 @command(b'perflookup', formatteropts)
1096 1096 def perflookup(ui, repo, rev, **opts):
1097 1097 opts = _byteskwargs(opts)
1098 1098 timer, fm = gettimer(ui, opts)
1099 1099 timer(lambda: len(repo.lookup(rev)))
1100 1100 fm.end()
1101 1101
1102 1102 @command(b'perflinelogedits',
1103 1103 [(b'n', b'edits', 10000, b'number of edits'),
1104 1104 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1105 1105 ], norepo=True)
1106 1106 def perflinelogedits(ui, **opts):
1107 1107 from mercurial import linelog
1108 1108
1109 1109 opts = _byteskwargs(opts)
1110 1110
1111 1111 edits = opts[b'edits']
1112 1112 maxhunklines = opts[b'max_hunk_lines']
1113 1113
1114 1114 maxb1 = 100000
1115 1115 random.seed(0)
1116 1116 randint = random.randint
1117 1117 currentlines = 0
1118 1118 arglist = []
1119 1119 for rev in _xrange(edits):
1120 1120 a1 = randint(0, currentlines)
1121 1121 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1122 1122 b1 = randint(0, maxb1)
1123 1123 b2 = randint(b1, b1 + maxhunklines)
1124 1124 currentlines += (b2 - b1) - (a2 - a1)
1125 1125 arglist.append((rev, a1, a2, b1, b2))
1126 1126
1127 1127 def d():
1128 1128 ll = linelog.linelog()
1129 1129 for args in arglist:
1130 1130 ll.replacelines(*args)
1131 1131
1132 1132 timer, fm = gettimer(ui, opts)
1133 1133 timer(d)
1134 1134 fm.end()
1135 1135
1136 1136 @command(b'perfrevrange', formatteropts)
1137 1137 def perfrevrange(ui, repo, *specs, **opts):
1138 1138 opts = _byteskwargs(opts)
1139 1139 timer, fm = gettimer(ui, opts)
1140 1140 revrange = scmutil.revrange
1141 1141 timer(lambda: len(revrange(repo, specs)))
1142 1142 fm.end()
1143 1143
1144 1144 @command(b'perfnodelookup', formatteropts)
1145 1145 def perfnodelookup(ui, repo, rev, **opts):
1146 1146 opts = _byteskwargs(opts)
1147 1147 timer, fm = gettimer(ui, opts)
1148 1148 import mercurial.revlog
1149 1149 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1150 1150 n = scmutil.revsingle(repo, rev).node()
1151 1151 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1152 1152 def d():
1153 1153 cl.rev(n)
1154 1154 clearcaches(cl)
1155 1155 timer(d)
1156 1156 fm.end()
1157 1157
1158 1158 @command(b'perflog',
1159 1159 [(b'', b'rename', False, b'ask log to follow renames')
1160 1160 ] + formatteropts)
1161 1161 def perflog(ui, repo, rev=None, **opts):
1162 1162 opts = _byteskwargs(opts)
1163 1163 if rev is None:
1164 1164 rev=[]
1165 1165 timer, fm = gettimer(ui, opts)
1166 1166 ui.pushbuffer()
1167 1167 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1168 1168 copies=opts.get(b'rename')))
1169 1169 ui.popbuffer()
1170 1170 fm.end()
1171 1171
1172 1172 @command(b'perfmoonwalk', formatteropts)
1173 1173 def perfmoonwalk(ui, repo, **opts):
1174 1174 """benchmark walking the changelog backwards
1175 1175
1176 1176 This also loads the changelog data for each revision in the changelog.
1177 1177 """
1178 1178 opts = _byteskwargs(opts)
1179 1179 timer, fm = gettimer(ui, opts)
1180 1180 def moonwalk():
1181 1181 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1182 1182 ctx = repo[i]
1183 1183 ctx.branch() # read changelog data (in addition to the index)
1184 1184 timer(moonwalk)
1185 1185 fm.end()
1186 1186
1187 1187 @command(b'perftemplating',
1188 1188 [(b'r', b'rev', [], b'revisions to run the template on'),
1189 1189 ] + formatteropts)
1190 1190 def perftemplating(ui, repo, testedtemplate=None, **opts):
1191 1191 """test the rendering time of a given template"""
1192 1192 if makelogtemplater is None:
1193 1193 raise error.Abort((b"perftemplating not available with this Mercurial"),
1194 1194 hint=b"use 4.3 or later")
1195 1195
1196 1196 opts = _byteskwargs(opts)
1197 1197
1198 1198 nullui = ui.copy()
1199 1199 nullui.fout = open(os.devnull, r'wb')
1200 1200 nullui.disablepager()
1201 1201 revs = opts.get(b'rev')
1202 1202 if not revs:
1203 1203 revs = [b'all()']
1204 1204 revs = list(scmutil.revrange(repo, revs))
1205 1205
1206 1206 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1207 1207 b' {author|person}: {desc|firstline}\n')
1208 1208 if testedtemplate is None:
1209 1209 testedtemplate = defaulttemplate
1210 1210 displayer = makelogtemplater(nullui, repo, testedtemplate)
1211 1211 def format():
1212 1212 for r in revs:
1213 1213 ctx = repo[r]
1214 1214 displayer.show(ctx)
1215 1215 displayer.flush(ctx)
1216 1216
1217 1217 timer, fm = gettimer(ui, opts)
1218 1218 timer(format)
1219 1219 fm.end()
1220 1220
1221 1221 @command(b'perfhelper-pathcopies', formatteropts +
1222 1222 [
1223 1223 (b'r', b'revs', [], b'restrict search to these revisions'),
1224 1224 (b'', b'timing', False, b'provides extra data (costly)'),
1225 1225 ])
1226 1226 def perfhelperpathcopies(ui, repo, revs=[], **opts):
1227 1227 """find statistic about potential parameters for the `perftracecopies`
1228 1228
1229 1229 This command find source-destination pair relevant for copytracing testing.
1230 1230 It report value for some of the parameters that impact copy tracing time.
1231 1231
1232 1232 If `--timing` is set, rename detection is run and the associated timing
1233 1233 will be reported. The extra details comes at the cost of a slower command
1234 1234 execution.
1235 1235
1236 1236 Since the rename detection is only run once, other factors might easily
1237 1237 affect the precision of the timing. However it should give a good
1238 1238 approximation of which revision pairs are very costly.
1239 1239 """
1240 1240 opts = _byteskwargs(opts)
1241 1241 fm = ui.formatter(b'perf', opts)
1242 1242 dotiming = opts[b'timing']
1243 1243
1244 1244 if dotiming:
1245 1245 header = '%12s %12s %12s %12s %12s %12s\n'
1246 1246 output = ("%(source)12s %(destination)12s "
1247 1247 "%(nbrevs)12d %(nbmissingfiles)12d "
1248 1248 "%(nbrenamedfiles)12d %(time)18.5f\n")
1249 1249 header_names = ("source", "destination", "nb-revs", "nb-files",
1250 1250 "nb-renames", "time")
1251 1251 fm.plain(header % header_names)
1252 1252 else:
1253 1253 header = '%12s %12s %12s %12s\n'
1254 1254 output = ("%(source)12s %(destination)12s "
1255 1255 "%(nbrevs)12d %(nbmissingfiles)12d\n")
1256 1256 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
1257 1257
1258 1258 if not revs:
1259 1259 revs = ['all()']
1260 1260 revs = scmutil.revrange(repo, revs)
1261 1261
1262 1262 roi = repo.revs('merge() and %ld', revs)
1263 1263 for r in roi:
1264 1264 ctx = repo[r]
1265 1265 p1 = ctx.p1().rev()
1266 1266 p2 = ctx.p2().rev()
1267 1267 bases = repo.changelog._commonancestorsheads(p1, p2)
1268 1268 for p in (p1, p2):
1269 1269 for b in bases:
1270 1270 base = repo[b]
1271 1271 parent = repo[p]
1272 1272 missing = copies._computeforwardmissing(base, parent)
1273 1273 if not missing:
1274 1274 continue
1275 1275 data = {
1276 1276 b'source': base.hex(),
1277 1277 b'destination': parent.hex(),
1278 1278 b'nbrevs': len(repo.revs('%d::%d', b, p)),
1279 1279 b'nbmissingfiles': len(missing),
1280 1280 }
1281 1281 if dotiming:
1282 1282 begin = util.timer()
1283 1283 renames = copies.pathcopies(base, parent)
1284 1284 end = util.timer()
1285 1285 # not very stable timing since we did only one run
1286 1286 data['time'] = end - begin
1287 1287 data['nbrenamedfiles'] = len(renames)
1288 1288 fm.startitem()
1289 1289 fm.data(**data)
1290 1290 out = data.copy()
1291 1291 out['source'] = fm.hexfunc(base.node())
1292 1292 out['destination'] = fm.hexfunc(parent.node())
1293 1293 fm.plain(output % out)
1294 1294
1295 1295 fm.end()
1296 1296
1297 1297 @command(b'perfcca', formatteropts)
1298 1298 def perfcca(ui, repo, **opts):
1299 1299 opts = _byteskwargs(opts)
1300 1300 timer, fm = gettimer(ui, opts)
1301 1301 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1302 1302 fm.end()
1303 1303
1304 1304 @command(b'perffncacheload', formatteropts)
1305 1305 def perffncacheload(ui, repo, **opts):
1306 1306 opts = _byteskwargs(opts)
1307 1307 timer, fm = gettimer(ui, opts)
1308 1308 s = repo.store
1309 1309 def d():
1310 1310 s.fncache._load()
1311 1311 timer(d)
1312 1312 fm.end()
1313 1313
1314 1314 @command(b'perffncachewrite', formatteropts)
1315 1315 def perffncachewrite(ui, repo, **opts):
1316 1316 opts = _byteskwargs(opts)
1317 1317 timer, fm = gettimer(ui, opts)
1318 1318 s = repo.store
1319 1319 lock = repo.lock()
1320 1320 s.fncache._load()
1321 1321 tr = repo.transaction(b'perffncachewrite')
1322 1322 tr.addbackup(b'fncache')
1323 1323 def d():
1324 1324 s.fncache._dirty = True
1325 1325 s.fncache.write(tr)
1326 1326 timer(d)
1327 1327 tr.close()
1328 1328 lock.release()
1329 1329 fm.end()
1330 1330
1331 1331 @command(b'perffncacheencode', formatteropts)
1332 1332 def perffncacheencode(ui, repo, **opts):
1333 1333 opts = _byteskwargs(opts)
1334 1334 timer, fm = gettimer(ui, opts)
1335 1335 s = repo.store
1336 1336 s.fncache._load()
1337 1337 def d():
1338 1338 for p in s.fncache.entries:
1339 1339 s.encode(p)
1340 1340 timer(d)
1341 1341 fm.end()
1342 1342
1343 1343 def _bdiffworker(q, blocks, xdiff, ready, done):
1344 1344 while not done.is_set():
1345 1345 pair = q.get()
1346 1346 while pair is not None:
1347 1347 if xdiff:
1348 1348 mdiff.bdiff.xdiffblocks(*pair)
1349 1349 elif blocks:
1350 1350 mdiff.bdiff.blocks(*pair)
1351 1351 else:
1352 1352 mdiff.textdiff(*pair)
1353 1353 q.task_done()
1354 1354 pair = q.get()
1355 1355 q.task_done() # for the None one
1356 1356 with ready:
1357 1357 ready.wait()
1358 1358
1359 1359 def _manifestrevision(repo, mnode):
1360 1360 ml = repo.manifestlog
1361 1361
1362 1362 if util.safehasattr(ml, b'getstorage'):
1363 1363 store = ml.getstorage(b'')
1364 1364 else:
1365 1365 store = ml._revlog
1366 1366
1367 1367 return store.revision(mnode)
1368 1368
1369 1369 @command(b'perfbdiff', revlogopts + formatteropts + [
1370 1370 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1371 1371 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1372 1372 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1373 1373 (b'', b'blocks', False, b'test computing diffs into blocks'),
1374 1374 (b'', b'xdiff', False, b'use xdiff algorithm'),
1375 1375 ],
1376 1376
1377 1377 b'-c|-m|FILE REV')
1378 1378 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1379 1379 """benchmark a bdiff between revisions
1380 1380
1381 1381 By default, benchmark a bdiff between its delta parent and itself.
1382 1382
1383 1383 With ``--count``, benchmark bdiffs between delta parents and self for N
1384 1384 revisions starting at the specified revision.
1385 1385
1386 1386 With ``--alldata``, assume the requested revision is a changeset and
1387 1387 measure bdiffs for all changes related to that changeset (manifest
1388 1388 and filelogs).
1389 1389 """
1390 1390 opts = _byteskwargs(opts)
1391 1391
1392 1392 if opts[b'xdiff'] and not opts[b'blocks']:
1393 1393 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1394 1394
1395 1395 if opts[b'alldata']:
1396 1396 opts[b'changelog'] = True
1397 1397
1398 1398 if opts.get(b'changelog') or opts.get(b'manifest'):
1399 1399 file_, rev = None, file_
1400 1400 elif rev is None:
1401 1401 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1402 1402
1403 1403 blocks = opts[b'blocks']
1404 1404 xdiff = opts[b'xdiff']
1405 1405 textpairs = []
1406 1406
1407 1407 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1408 1408
1409 1409 startrev = r.rev(r.lookup(rev))
1410 1410 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1411 1411 if opts[b'alldata']:
1412 1412 # Load revisions associated with changeset.
1413 1413 ctx = repo[rev]
1414 1414 mtext = _manifestrevision(repo, ctx.manifestnode())
1415 1415 for pctx in ctx.parents():
1416 1416 pman = _manifestrevision(repo, pctx.manifestnode())
1417 1417 textpairs.append((pman, mtext))
1418 1418
1419 1419 # Load filelog revisions by iterating manifest delta.
1420 1420 man = ctx.manifest()
1421 1421 pman = ctx.p1().manifest()
1422 1422 for filename, change in pman.diff(man).items():
1423 1423 fctx = repo.file(filename)
1424 1424 f1 = fctx.revision(change[0][0] or -1)
1425 1425 f2 = fctx.revision(change[1][0] or -1)
1426 1426 textpairs.append((f1, f2))
1427 1427 else:
1428 1428 dp = r.deltaparent(rev)
1429 1429 textpairs.append((r.revision(dp), r.revision(rev)))
1430 1430
1431 1431 withthreads = threads > 0
1432 1432 if not withthreads:
1433 1433 def d():
1434 1434 for pair in textpairs:
1435 1435 if xdiff:
1436 1436 mdiff.bdiff.xdiffblocks(*pair)
1437 1437 elif blocks:
1438 1438 mdiff.bdiff.blocks(*pair)
1439 1439 else:
1440 1440 mdiff.textdiff(*pair)
1441 1441 else:
1442 1442 q = queue()
1443 1443 for i in _xrange(threads):
1444 1444 q.put(None)
1445 1445 ready = threading.Condition()
1446 1446 done = threading.Event()
1447 1447 for i in _xrange(threads):
1448 1448 threading.Thread(target=_bdiffworker,
1449 1449 args=(q, blocks, xdiff, ready, done)).start()
1450 1450 q.join()
1451 1451 def d():
1452 1452 for pair in textpairs:
1453 1453 q.put(pair)
1454 1454 for i in _xrange(threads):
1455 1455 q.put(None)
1456 1456 with ready:
1457 1457 ready.notify_all()
1458 1458 q.join()
1459 1459 timer, fm = gettimer(ui, opts)
1460 1460 timer(d)
1461 1461 fm.end()
1462 1462
1463 1463 if withthreads:
1464 1464 done.set()
1465 1465 for i in _xrange(threads):
1466 1466 q.put(None)
1467 1467 with ready:
1468 1468 ready.notify_all()
1469 1469
1470 1470 @command(b'perfunidiff', revlogopts + formatteropts + [
1471 1471 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1472 1472 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
1473 1473 ], b'-c|-m|FILE REV')
1474 1474 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1475 1475 """benchmark a unified diff between revisions
1476 1476
1477 1477 This doesn't include any copy tracing - it's just a unified diff
1478 1478 of the texts.
1479 1479
1480 1480 By default, benchmark a diff between its delta parent and itself.
1481 1481
1482 1482 With ``--count``, benchmark diffs between delta parents and self for N
1483 1483 revisions starting at the specified revision.
1484 1484
1485 1485 With ``--alldata``, assume the requested revision is a changeset and
1486 1486 measure diffs for all changes related to that changeset (manifest
1487 1487 and filelogs).
1488 1488 """
1489 1489 opts = _byteskwargs(opts)
1490 1490 if opts[b'alldata']:
1491 1491 opts[b'changelog'] = True
1492 1492
1493 1493 if opts.get(b'changelog') or opts.get(b'manifest'):
1494 1494 file_, rev = None, file_
1495 1495 elif rev is None:
1496 1496 raise error.CommandError(b'perfunidiff', b'invalid arguments')
1497 1497
1498 1498 textpairs = []
1499 1499
1500 1500 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
1501 1501
1502 1502 startrev = r.rev(r.lookup(rev))
1503 1503 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1504 1504 if opts[b'alldata']:
1505 1505 # Load revisions associated with changeset.
1506 1506 ctx = repo[rev]
1507 1507 mtext = _manifestrevision(repo, ctx.manifestnode())
1508 1508 for pctx in ctx.parents():
1509 1509 pman = _manifestrevision(repo, pctx.manifestnode())
1510 1510 textpairs.append((pman, mtext))
1511 1511
1512 1512 # Load filelog revisions by iterating manifest delta.
1513 1513 man = ctx.manifest()
1514 1514 pman = ctx.p1().manifest()
1515 1515 for filename, change in pman.diff(man).items():
1516 1516 fctx = repo.file(filename)
1517 1517 f1 = fctx.revision(change[0][0] or -1)
1518 1518 f2 = fctx.revision(change[1][0] or -1)
1519 1519 textpairs.append((f1, f2))
1520 1520 else:
1521 1521 dp = r.deltaparent(rev)
1522 1522 textpairs.append((r.revision(dp), r.revision(rev)))
1523 1523
1524 1524 def d():
1525 1525 for left, right in textpairs:
1526 1526 # The date strings don't matter, so we pass empty strings.
1527 1527 headerlines, hunks = mdiff.unidiff(
1528 1528 left, b'', right, b'', b'left', b'right', binary=False)
1529 1529 # consume iterators in roughly the way patch.py does
1530 1530 b'\n'.join(headerlines)
1531 1531 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1532 1532 timer, fm = gettimer(ui, opts)
1533 1533 timer(d)
1534 1534 fm.end()
1535 1535
1536 1536 @command(b'perfdiffwd', formatteropts)
1537 1537 def perfdiffwd(ui, repo, **opts):
1538 1538 """Profile diff of working directory changes"""
1539 1539 opts = _byteskwargs(opts)
1540 1540 timer, fm = gettimer(ui, opts)
1541 1541 options = {
1542 1542 'w': 'ignore_all_space',
1543 1543 'b': 'ignore_space_change',
1544 1544 'B': 'ignore_blank_lines',
1545 1545 }
1546 1546
1547 1547 for diffopt in ('', 'w', 'b', 'B', 'wB'):
1548 1548 opts = dict((options[c], b'1') for c in diffopt)
1549 1549 def d():
1550 1550 ui.pushbuffer()
1551 1551 commands.diff(ui, repo, **opts)
1552 1552 ui.popbuffer()
1553 1553 diffopt = diffopt.encode('ascii')
1554 1554 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
1555 1555 timer(d, title=title)
1556 1556 fm.end()
1557 1557
1558 1558 @command(b'perfrevlogindex', revlogopts + formatteropts,
1559 1559 b'-c|-m|FILE')
1560 1560 def perfrevlogindex(ui, repo, file_=None, **opts):
1561 1561 """Benchmark operations against a revlog index.
1562 1562
1563 1563 This tests constructing a revlog instance, reading index data,
1564 1564 parsing index data, and performing various operations related to
1565 1565 index data.
1566 1566 """
1567 1567
1568 1568 opts = _byteskwargs(opts)
1569 1569
1570 1570 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
1571 1571
1572 1572 opener = getattr(rl, 'opener') # trick linter
1573 1573 indexfile = rl.indexfile
1574 1574 data = opener.read(indexfile)
1575 1575
1576 1576 header = struct.unpack(b'>I', data[0:4])[0]
1577 1577 version = header & 0xFFFF
1578 1578 if version == 1:
1579 1579 revlogio = revlog.revlogio()
1580 1580 inline = header & (1 << 16)
1581 1581 else:
1582 1582 raise error.Abort((b'unsupported revlog version: %d') % version)
1583 1583
1584 1584 rllen = len(rl)
1585 1585
1586 1586 node0 = rl.node(0)
1587 1587 node25 = rl.node(rllen // 4)
1588 1588 node50 = rl.node(rllen // 2)
1589 1589 node75 = rl.node(rllen // 4 * 3)
1590 1590 node100 = rl.node(rllen - 1)
1591 1591
1592 1592 allrevs = range(rllen)
1593 1593 allrevsrev = list(reversed(allrevs))
1594 1594 allnodes = [rl.node(rev) for rev in range(rllen)]
1595 1595 allnodesrev = list(reversed(allnodes))
1596 1596
1597 1597 def constructor():
1598 1598 revlog.revlog(opener, indexfile)
1599 1599
1600 1600 def read():
1601 1601 with opener(indexfile) as fh:
1602 1602 fh.read()
1603 1603
1604 1604 def parseindex():
1605 1605 revlogio.parseindex(data, inline)
1606 1606
1607 1607 def getentry(revornode):
1608 1608 index = revlogio.parseindex(data, inline)[0]
1609 1609 index[revornode]
1610 1610
1611 1611 def getentries(revs, count=1):
1612 1612 index = revlogio.parseindex(data, inline)[0]
1613 1613
1614 1614 for i in range(count):
1615 1615 for rev in revs:
1616 1616 index[rev]
1617 1617
1618 1618 def resolvenode(node):
1619 1619 nodemap = revlogio.parseindex(data, inline)[1]
1620 1620 # This only works for the C code.
1621 1621 if nodemap is None:
1622 1622 return
1623 1623
1624 1624 try:
1625 1625 nodemap[node]
1626 1626 except error.RevlogError:
1627 1627 pass
1628 1628
1629 1629 def resolvenodes(nodes, count=1):
1630 1630 nodemap = revlogio.parseindex(data, inline)[1]
1631 1631 if nodemap is None:
1632 1632 return
1633 1633
1634 1634 for i in range(count):
1635 1635 for node in nodes:
1636 1636 try:
1637 1637 nodemap[node]
1638 1638 except error.RevlogError:
1639 1639 pass
1640 1640
1641 1641 benches = [
1642 1642 (constructor, b'revlog constructor'),
1643 1643 (read, b'read'),
1644 1644 (parseindex, b'create index object'),
1645 1645 (lambda: getentry(0), b'retrieve index entry for rev 0'),
1646 1646 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
1647 1647 (lambda: resolvenode(node0), b'look up node at rev 0'),
1648 1648 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
1649 1649 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
1650 1650 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
1651 1651 (lambda: resolvenode(node100), b'look up node at tip'),
1652 1652 # 2x variation is to measure caching impact.
1653 1653 (lambda: resolvenodes(allnodes),
1654 1654 b'look up all nodes (forward)'),
1655 1655 (lambda: resolvenodes(allnodes, 2),
1656 1656 b'look up all nodes 2x (forward)'),
1657 1657 (lambda: resolvenodes(allnodesrev),
1658 1658 b'look up all nodes (reverse)'),
1659 1659 (lambda: resolvenodes(allnodesrev, 2),
1660 1660 b'look up all nodes 2x (reverse)'),
1661 1661 (lambda: getentries(allrevs),
1662 1662 b'retrieve all index entries (forward)'),
1663 1663 (lambda: getentries(allrevs, 2),
1664 1664 b'retrieve all index entries 2x (forward)'),
1665 1665 (lambda: getentries(allrevsrev),
1666 1666 b'retrieve all index entries (reverse)'),
1667 1667 (lambda: getentries(allrevsrev, 2),
1668 1668 b'retrieve all index entries 2x (reverse)'),
1669 1669 ]
1670 1670
1671 1671 for fn, title in benches:
1672 1672 timer, fm = gettimer(ui, opts)
1673 1673 timer(fn, title=title)
1674 1674 fm.end()
1675 1675
1676 1676 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
1677 1677 [(b'd', b'dist', 100, b'distance between the revisions'),
1678 1678 (b's', b'startrev', 0, b'revision to start reading at'),
1679 1679 (b'', b'reverse', False, b'read in reverse')],
1680 1680 b'-c|-m|FILE')
1681 1681 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
1682 1682 **opts):
1683 1683 """Benchmark reading a series of revisions from a revlog.
1684 1684
1685 1685 By default, we read every ``-d/--dist`` revision from 0 to tip of
1686 1686 the specified revlog.
1687 1687
1688 1688 The start revision can be defined via ``-s/--startrev``.
1689 1689 """
1690 1690 opts = _byteskwargs(opts)
1691 1691
1692 1692 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
1693 1693 rllen = getlen(ui)(rl)
1694 1694
1695 1695 if startrev < 0:
1696 1696 startrev = rllen + startrev
1697 1697
1698 1698 def d():
1699 1699 rl.clearcaches()
1700 1700
1701 1701 beginrev = startrev
1702 1702 endrev = rllen
1703 1703 dist = opts[b'dist']
1704 1704
1705 1705 if reverse:
1706 1706 beginrev, endrev = endrev - 1, beginrev - 1
1707 1707 dist = -1 * dist
1708 1708
1709 1709 for x in _xrange(beginrev, endrev, dist):
1710 1710 # Old revisions don't support passing int.
1711 1711 n = rl.node(x)
1712 1712 rl.revision(n)
1713 1713
1714 1714 timer, fm = gettimer(ui, opts)
1715 1715 timer(d)
1716 1716 fm.end()
1717 1717
1718 1718 @command(b'perfrevlogwrite', revlogopts + formatteropts +
1719 1719 [(b's', b'startrev', 1000, b'revision to start writing at'),
1720 1720 (b'', b'stoprev', -1, b'last revision to write'),
1721 1721 (b'', b'count', 3, b'last revision to write'),
1722 1722 (b'', b'details', False, b'print timing for every revisions tested'),
1723 1723 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
1724 1724 (b'', b'lazydeltabase', True, b'try the provided delta first'),
1725 1725 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1726 1726 ],
1727 1727 b'-c|-m|FILE')
1728 1728 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
1729 1729 """Benchmark writing a series of revisions to a revlog.
1730 1730
1731 1731 Possible source values are:
1732 1732 * `full`: add from a full text (default).
1733 1733 * `parent-1`: add from a delta to the first parent
1734 1734 * `parent-2`: add from a delta to the second parent if it exists
1735 1735 (use a delta from the first parent otherwise)
1736 1736 * `parent-smallest`: add from the smallest delta (either p1 or p2)
1737 1737 * `storage`: add from the existing precomputed deltas
1738 1738 """
1739 1739 opts = _byteskwargs(opts)
1740 1740
1741 1741 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
1742 1742 rllen = getlen(ui)(rl)
1743 1743 if startrev < 0:
1744 1744 startrev = rllen + startrev
1745 1745 if stoprev < 0:
1746 1746 stoprev = rllen + stoprev
1747 1747
1748 1748 lazydeltabase = opts['lazydeltabase']
1749 1749 source = opts['source']
1750 1750 clearcaches = opts['clear_caches']
1751 1751 validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest',
1752 1752 b'storage')
1753 1753 if source not in validsource:
1754 1754 raise error.Abort('invalid source type: %s' % source)
1755 1755
1756 1756 ### actually gather results
1757 1757 count = opts['count']
1758 1758 if count <= 0:
1759 1759 raise error.Abort('invalide run count: %d' % count)
1760 1760 allresults = []
1761 1761 for c in range(count):
1762 1762 timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1,
1763 1763 lazydeltabase=lazydeltabase,
1764 1764 clearcaches=clearcaches)
1765 1765 allresults.append(timing)
1766 1766
1767 1767 ### consolidate the results in a single list
1768 1768 results = []
1769 1769 for idx, (rev, t) in enumerate(allresults[0]):
1770 1770 ts = [t]
1771 1771 for other in allresults[1:]:
1772 1772 orev, ot = other[idx]
1773 1773 assert orev == rev
1774 1774 ts.append(ot)
1775 1775 results.append((rev, ts))
1776 1776 resultcount = len(results)
1777 1777
1778 1778 ### Compute and display relevant statistics
1779 1779
1780 1780 # get a formatter
1781 1781 fm = ui.formatter(b'perf', opts)
1782 1782 displayall = ui.configbool(b"perf", b"all-timing", False)
1783 1783
1784 1784 # print individual details if requested
1785 1785 if opts['details']:
1786 1786 for idx, item in enumerate(results, 1):
1787 1787 rev, data = item
1788 1788 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
1789 1789 formatone(fm, data, title=title, displayall=displayall)
1790 1790
1791 1791 # sorts results by median time
1792 1792 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
1793 1793 # list of (name, index) to display)
1794 1794 relevants = [
1795 1795 ("min", 0),
1796 1796 ("10%", resultcount * 10 // 100),
1797 1797 ("25%", resultcount * 25 // 100),
1798 1798 ("50%", resultcount * 70 // 100),
1799 1799 ("75%", resultcount * 75 // 100),
1800 1800 ("90%", resultcount * 90 // 100),
1801 1801 ("95%", resultcount * 95 // 100),
1802 1802 ("99%", resultcount * 99 // 100),
1803 1803 ("99.9%", resultcount * 999 // 1000),
1804 1804 ("99.99%", resultcount * 9999 // 10000),
1805 1805 ("99.999%", resultcount * 99999 // 100000),
1806 1806 ("max", -1),
1807 1807 ]
1808 1808 if not ui.quiet:
1809 1809 for name, idx in relevants:
1810 1810 data = results[idx]
1811 1811 title = '%s of %d, rev %d' % (name, resultcount, data[0])
1812 1812 formatone(fm, data[1], title=title, displayall=displayall)
1813 1813
1814 1814 # XXX summing that many float will not be very precise, we ignore this fact
1815 1815 # for now
1816 1816 totaltime = []
1817 1817 for item in allresults:
1818 1818 totaltime.append((sum(x[1][0] for x in item),
1819 1819 sum(x[1][1] for x in item),
1820 1820 sum(x[1][2] for x in item),)
1821 1821 )
1822 1822 formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
1823 1823 displayall=displayall)
1824 1824 fm.end()
1825 1825
1826 1826 class _faketr(object):
1827 1827 def add(s, x, y, z=None):
1828 1828 return None
1829 1829
1830 1830 def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None,
1831 1831 lazydeltabase=True, clearcaches=True):
1832 1832 timings = []
1833 1833 tr = _faketr()
1834 1834 with _temprevlog(ui, orig, startrev) as dest:
1835 1835 dest._lazydeltabase = lazydeltabase
1836 1836 revs = list(orig.revs(startrev, stoprev))
1837 1837 total = len(revs)
1838 1838 topic = 'adding'
1839 1839 if runidx is not None:
1840 1840 topic += ' (run #%d)' % runidx
1841 # Support both old and new progress API
1842 if util.safehasattr(ui, 'makeprogress'):
1843 progress = ui.makeprogress(topic, unit='revs', total=total)
1844 def updateprogress(pos):
1845 progress.update(pos)
1846 def completeprogress():
1847 progress.complete()
1848 else:
1849 def updateprogress(pos):
1850 ui.progress(topic, pos, unit='revs', total=total)
1851 def completeprogress():
1852 ui.progress(topic, None, unit='revs', total=total)
1853
1841 1854 for idx, rev in enumerate(revs):
1842 ui.progress(topic, idx, unit='revs', total=total)
1855 updateprogress(idx)
1843 1856 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
1844 1857 if clearcaches:
1845 1858 dest.index.clearcaches()
1846 1859 dest.clearcaches()
1847 1860 with timeone() as r:
1848 1861 dest.addrawrevision(*addargs, **addkwargs)
1849 1862 timings.append((rev, r[0]))
1850 ui.progress(topic, total, unit='revs', total=total)
1851 ui.progress(topic, None, unit='revs', total=total)
1863 updateprogress(total)
1864 completeprogress()
1852 1865 return timings
1853 1866
1854 1867 def _getrevisionseed(orig, rev, tr, source):
1855 1868 from mercurial.node import nullid
1856 1869
1857 1870 linkrev = orig.linkrev(rev)
1858 1871 node = orig.node(rev)
1859 1872 p1, p2 = orig.parents(node)
1860 1873 flags = orig.flags(rev)
1861 1874 cachedelta = None
1862 1875 text = None
1863 1876
1864 1877 if source == b'full':
1865 1878 text = orig.revision(rev)
1866 1879 elif source == b'parent-1':
1867 1880 baserev = orig.rev(p1)
1868 1881 cachedelta = (baserev, orig.revdiff(p1, rev))
1869 1882 elif source == b'parent-2':
1870 1883 parent = p2
1871 1884 if p2 == nullid:
1872 1885 parent = p1
1873 1886 baserev = orig.rev(parent)
1874 1887 cachedelta = (baserev, orig.revdiff(parent, rev))
1875 1888 elif source == b'parent-smallest':
1876 1889 p1diff = orig.revdiff(p1, rev)
1877 1890 parent = p1
1878 1891 diff = p1diff
1879 1892 if p2 != nullid:
1880 1893 p2diff = orig.revdiff(p2, rev)
1881 1894 if len(p1diff) > len(p2diff):
1882 1895 parent = p2
1883 1896 diff = p2diff
1884 1897 baserev = orig.rev(parent)
1885 1898 cachedelta = (baserev, diff)
1886 1899 elif source == b'storage':
1887 1900 baserev = orig.deltaparent(rev)
1888 1901 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
1889 1902
1890 1903 return ((text, tr, linkrev, p1, p2),
1891 1904 {'node': node, 'flags': flags, 'cachedelta': cachedelta})
1892 1905
1893 1906 @contextlib.contextmanager
1894 1907 def _temprevlog(ui, orig, truncaterev):
1895 1908 from mercurial import vfs as vfsmod
1896 1909
1897 1910 if orig._inline:
1898 1911 raise error.Abort('not supporting inline revlog (yet)')
1899 1912
1900 1913 origindexpath = orig.opener.join(orig.indexfile)
1901 1914 origdatapath = orig.opener.join(orig.datafile)
1902 1915 indexname = 'revlog.i'
1903 1916 dataname = 'revlog.d'
1904 1917
1905 1918 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
1906 1919 try:
1907 1920 # copy the data file in a temporary directory
1908 1921 ui.debug('copying data in %s\n' % tmpdir)
1909 1922 destindexpath = os.path.join(tmpdir, 'revlog.i')
1910 1923 destdatapath = os.path.join(tmpdir, 'revlog.d')
1911 1924 shutil.copyfile(origindexpath, destindexpath)
1912 1925 shutil.copyfile(origdatapath, destdatapath)
1913 1926
1914 1927 # remove the data we want to add again
1915 1928 ui.debug('truncating data to be rewritten\n')
1916 1929 with open(destindexpath, 'ab') as index:
1917 1930 index.seek(0)
1918 1931 index.truncate(truncaterev * orig._io.size)
1919 1932 with open(destdatapath, 'ab') as data:
1920 1933 data.seek(0)
1921 1934 data.truncate(orig.start(truncaterev))
1922 1935
1923 1936 # instantiate a new revlog from the temporary copy
1924 1937 ui.debug('truncating adding to be rewritten\n')
1925 1938 vfs = vfsmod.vfs(tmpdir)
1926 1939 vfs.options = getattr(orig.opener, 'options', None)
1927 1940
1928 1941 dest = revlog.revlog(vfs,
1929 1942 indexfile=indexname,
1930 1943 datafile=dataname)
1931 1944 if dest._inline:
1932 1945 raise error.Abort('not supporting inline revlog (yet)')
1933 1946 # make sure internals are initialized
1934 1947 dest.revision(len(dest) - 1)
1935 1948 yield dest
1936 1949 del dest, vfs
1937 1950 finally:
1938 1951 shutil.rmtree(tmpdir, True)
1939 1952
1940 1953 @command(b'perfrevlogchunks', revlogopts + formatteropts +
1941 1954 [(b'e', b'engines', b'', b'compression engines to use'),
1942 1955 (b's', b'startrev', 0, b'revision to start at')],
1943 1956 b'-c|-m|FILE')
1944 1957 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
1945 1958 """Benchmark operations on revlog chunks.
1946 1959
1947 1960 Logically, each revlog is a collection of fulltext revisions. However,
1948 1961 stored within each revlog are "chunks" of possibly compressed data. This
1949 1962 data needs to be read and decompressed or compressed and written.
1950 1963
1951 1964 This command measures the time it takes to read+decompress and recompress
1952 1965 chunks in a revlog. It effectively isolates I/O and compression performance.
1953 1966 For measurements of higher-level operations like resolving revisions,
1954 1967 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
1955 1968 """
1956 1969 opts = _byteskwargs(opts)
1957 1970
1958 1971 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
1959 1972
1960 1973 # _chunkraw was renamed to _getsegmentforrevs.
1961 1974 try:
1962 1975 segmentforrevs = rl._getsegmentforrevs
1963 1976 except AttributeError:
1964 1977 segmentforrevs = rl._chunkraw
1965 1978
1966 1979 # Verify engines argument.
1967 1980 if engines:
1968 1981 engines = set(e.strip() for e in engines.split(b','))
1969 1982 for engine in engines:
1970 1983 try:
1971 1984 util.compressionengines[engine]
1972 1985 except KeyError:
1973 1986 raise error.Abort(b'unknown compression engine: %s' % engine)
1974 1987 else:
1975 1988 engines = []
1976 1989 for e in util.compengines:
1977 1990 engine = util.compengines[e]
1978 1991 try:
1979 1992 if engine.available():
1980 1993 engine.revlogcompressor().compress(b'dummy')
1981 1994 engines.append(e)
1982 1995 except NotImplementedError:
1983 1996 pass
1984 1997
1985 1998 revs = list(rl.revs(startrev, len(rl) - 1))
1986 1999
1987 2000 def rlfh(rl):
1988 2001 if rl._inline:
1989 2002 return getsvfs(repo)(rl.indexfile)
1990 2003 else:
1991 2004 return getsvfs(repo)(rl.datafile)
1992 2005
1993 2006 def doread():
1994 2007 rl.clearcaches()
1995 2008 for rev in revs:
1996 2009 segmentforrevs(rev, rev)
1997 2010
1998 2011 def doreadcachedfh():
1999 2012 rl.clearcaches()
2000 2013 fh = rlfh(rl)
2001 2014 for rev in revs:
2002 2015 segmentforrevs(rev, rev, df=fh)
2003 2016
2004 2017 def doreadbatch():
2005 2018 rl.clearcaches()
2006 2019 segmentforrevs(revs[0], revs[-1])
2007 2020
2008 2021 def doreadbatchcachedfh():
2009 2022 rl.clearcaches()
2010 2023 fh = rlfh(rl)
2011 2024 segmentforrevs(revs[0], revs[-1], df=fh)
2012 2025
2013 2026 def dochunk():
2014 2027 rl.clearcaches()
2015 2028 fh = rlfh(rl)
2016 2029 for rev in revs:
2017 2030 rl._chunk(rev, df=fh)
2018 2031
2019 2032 chunks = [None]
2020 2033
2021 2034 def dochunkbatch():
2022 2035 rl.clearcaches()
2023 2036 fh = rlfh(rl)
2024 2037 # Save chunks as a side-effect.
2025 2038 chunks[0] = rl._chunks(revs, df=fh)
2026 2039
2027 2040 def docompress(compressor):
2028 2041 rl.clearcaches()
2029 2042
2030 2043 try:
2031 2044 # Swap in the requested compression engine.
2032 2045 oldcompressor = rl._compressor
2033 2046 rl._compressor = compressor
2034 2047 for chunk in chunks[0]:
2035 2048 rl.compress(chunk)
2036 2049 finally:
2037 2050 rl._compressor = oldcompressor
2038 2051
2039 2052 benches = [
2040 2053 (lambda: doread(), b'read'),
2041 2054 (lambda: doreadcachedfh(), b'read w/ reused fd'),
2042 2055 (lambda: doreadbatch(), b'read batch'),
2043 2056 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
2044 2057 (lambda: dochunk(), b'chunk'),
2045 2058 (lambda: dochunkbatch(), b'chunk batch'),
2046 2059 ]
2047 2060
2048 2061 for engine in sorted(engines):
2049 2062 compressor = util.compengines[engine].revlogcompressor()
2050 2063 benches.append((functools.partial(docompress, compressor),
2051 2064 b'compress w/ %s' % engine))
2052 2065
2053 2066 for fn, title in benches:
2054 2067 timer, fm = gettimer(ui, opts)
2055 2068 timer(fn, title=title)
2056 2069 fm.end()
2057 2070
2058 2071 @command(b'perfrevlogrevision', revlogopts + formatteropts +
2059 2072 [(b'', b'cache', False, b'use caches instead of clearing')],
2060 2073 b'-c|-m|FILE REV')
2061 2074 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
2062 2075 """Benchmark obtaining a revlog revision.
2063 2076
2064 2077 Obtaining a revlog revision consists of roughly the following steps:
2065 2078
2066 2079 1. Compute the delta chain
2067 2080 2. Slice the delta chain if applicable
2068 2081 3. Obtain the raw chunks for that delta chain
2069 2082 4. Decompress each raw chunk
2070 2083 5. Apply binary patches to obtain fulltext
2071 2084 6. Verify hash of fulltext
2072 2085
2073 2086 This command measures the time spent in each of these phases.
2074 2087 """
2075 2088 opts = _byteskwargs(opts)
2076 2089
2077 2090 if opts.get(b'changelog') or opts.get(b'manifest'):
2078 2091 file_, rev = None, file_
2079 2092 elif rev is None:
2080 2093 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
2081 2094
2082 2095 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
2083 2096
2084 2097 # _chunkraw was renamed to _getsegmentforrevs.
2085 2098 try:
2086 2099 segmentforrevs = r._getsegmentforrevs
2087 2100 except AttributeError:
2088 2101 segmentforrevs = r._chunkraw
2089 2102
2090 2103 node = r.lookup(rev)
2091 2104 rev = r.rev(node)
2092 2105
2093 2106 def getrawchunks(data, chain):
2094 2107 start = r.start
2095 2108 length = r.length
2096 2109 inline = r._inline
2097 2110 iosize = r._io.size
2098 2111 buffer = util.buffer
2099 2112
2100 2113 chunks = []
2101 2114 ladd = chunks.append
2102 2115 for idx, item in enumerate(chain):
2103 2116 offset = start(item[0])
2104 2117 bits = data[idx]
2105 2118 for rev in item:
2106 2119 chunkstart = start(rev)
2107 2120 if inline:
2108 2121 chunkstart += (rev + 1) * iosize
2109 2122 chunklength = length(rev)
2110 2123 ladd(buffer(bits, chunkstart - offset, chunklength))
2111 2124
2112 2125 return chunks
2113 2126
2114 2127 def dodeltachain(rev):
2115 2128 if not cache:
2116 2129 r.clearcaches()
2117 2130 r._deltachain(rev)
2118 2131
2119 2132 def doread(chain):
2120 2133 if not cache:
2121 2134 r.clearcaches()
2122 2135 for item in slicedchain:
2123 2136 segmentforrevs(item[0], item[-1])
2124 2137
2125 2138 def doslice(r, chain, size):
2126 2139 for s in slicechunk(r, chain, targetsize=size):
2127 2140 pass
2128 2141
2129 2142 def dorawchunks(data, chain):
2130 2143 if not cache:
2131 2144 r.clearcaches()
2132 2145 getrawchunks(data, chain)
2133 2146
2134 2147 def dodecompress(chunks):
2135 2148 decomp = r.decompress
2136 2149 for chunk in chunks:
2137 2150 decomp(chunk)
2138 2151
2139 2152 def dopatch(text, bins):
2140 2153 if not cache:
2141 2154 r.clearcaches()
2142 2155 mdiff.patches(text, bins)
2143 2156
2144 2157 def dohash(text):
2145 2158 if not cache:
2146 2159 r.clearcaches()
2147 2160 r.checkhash(text, node, rev=rev)
2148 2161
2149 2162 def dorevision():
2150 2163 if not cache:
2151 2164 r.clearcaches()
2152 2165 r.revision(node)
2153 2166
2154 2167 try:
2155 2168 from mercurial.revlogutils.deltas import slicechunk
2156 2169 except ImportError:
2157 2170 slicechunk = getattr(revlog, '_slicechunk', None)
2158 2171
2159 2172 size = r.length(rev)
2160 2173 chain = r._deltachain(rev)[0]
2161 2174 if not getattr(r, '_withsparseread', False):
2162 2175 slicedchain = (chain,)
2163 2176 else:
2164 2177 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
2165 2178 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
2166 2179 rawchunks = getrawchunks(data, slicedchain)
2167 2180 bins = r._chunks(chain)
2168 2181 text = bytes(bins[0])
2169 2182 bins = bins[1:]
2170 2183 text = mdiff.patches(text, bins)
2171 2184
2172 2185 benches = [
2173 2186 (lambda: dorevision(), b'full'),
2174 2187 (lambda: dodeltachain(rev), b'deltachain'),
2175 2188 (lambda: doread(chain), b'read'),
2176 2189 ]
2177 2190
2178 2191 if getattr(r, '_withsparseread', False):
2179 2192 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
2180 2193 benches.append(slicing)
2181 2194
2182 2195 benches.extend([
2183 2196 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
2184 2197 (lambda: dodecompress(rawchunks), b'decompress'),
2185 2198 (lambda: dopatch(text, bins), b'patch'),
2186 2199 (lambda: dohash(text), b'hash'),
2187 2200 ])
2188 2201
2189 2202 timer, fm = gettimer(ui, opts)
2190 2203 for fn, title in benches:
2191 2204 timer(fn, title=title)
2192 2205 fm.end()
2193 2206
2194 2207 @command(b'perfrevset',
2195 2208 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
2196 2209 (b'', b'contexts', False, b'obtain changectx for each revision')]
2197 2210 + formatteropts, b"REVSET")
2198 2211 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
2199 2212 """benchmark the execution time of a revset
2200 2213
2201 2214 Use the --clean option if need to evaluate the impact of build volatile
2202 2215 revisions set cache on the revset execution. Volatile cache hold filtered
2203 2216 and obsolete related cache."""
2204 2217 opts = _byteskwargs(opts)
2205 2218
2206 2219 timer, fm = gettimer(ui, opts)
2207 2220 def d():
2208 2221 if clear:
2209 2222 repo.invalidatevolatilesets()
2210 2223 if contexts:
2211 2224 for ctx in repo.set(expr): pass
2212 2225 else:
2213 2226 for r in repo.revs(expr): pass
2214 2227 timer(d)
2215 2228 fm.end()
2216 2229
2217 2230 @command(b'perfvolatilesets',
2218 2231 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
2219 2232 ] + formatteropts)
2220 2233 def perfvolatilesets(ui, repo, *names, **opts):
2221 2234 """benchmark the computation of various volatile set
2222 2235
2223 2236 Volatile set computes element related to filtering and obsolescence."""
2224 2237 opts = _byteskwargs(opts)
2225 2238 timer, fm = gettimer(ui, opts)
2226 2239 repo = repo.unfiltered()
2227 2240
2228 2241 def getobs(name):
2229 2242 def d():
2230 2243 repo.invalidatevolatilesets()
2231 2244 if opts[b'clear_obsstore']:
2232 2245 clearfilecache(repo, b'obsstore')
2233 2246 obsolete.getrevs(repo, name)
2234 2247 return d
2235 2248
2236 2249 allobs = sorted(obsolete.cachefuncs)
2237 2250 if names:
2238 2251 allobs = [n for n in allobs if n in names]
2239 2252
2240 2253 for name in allobs:
2241 2254 timer(getobs(name), title=name)
2242 2255
2243 2256 def getfiltered(name):
2244 2257 def d():
2245 2258 repo.invalidatevolatilesets()
2246 2259 if opts[b'clear_obsstore']:
2247 2260 clearfilecache(repo, b'obsstore')
2248 2261 repoview.filterrevs(repo, name)
2249 2262 return d
2250 2263
2251 2264 allfilter = sorted(repoview.filtertable)
2252 2265 if names:
2253 2266 allfilter = [n for n in allfilter if n in names]
2254 2267
2255 2268 for name in allfilter:
2256 2269 timer(getfiltered(name), title=name)
2257 2270 fm.end()
2258 2271
2259 2272 @command(b'perfbranchmap',
2260 2273 [(b'f', b'full', False,
2261 2274 b'Includes build time of subset'),
2262 2275 (b'', b'clear-revbranch', False,
2263 2276 b'purge the revbranch cache between computation'),
2264 2277 ] + formatteropts)
2265 2278 def perfbranchmap(ui, repo, *filternames, **opts):
2266 2279 """benchmark the update of a branchmap
2267 2280
2268 2281 This benchmarks the full repo.branchmap() call with read and write disabled
2269 2282 """
2270 2283 opts = _byteskwargs(opts)
2271 2284 full = opts.get(b"full", False)
2272 2285 clear_revbranch = opts.get(b"clear_revbranch", False)
2273 2286 timer, fm = gettimer(ui, opts)
2274 2287 def getbranchmap(filtername):
2275 2288 """generate a benchmark function for the filtername"""
2276 2289 if filtername is None:
2277 2290 view = repo
2278 2291 else:
2279 2292 view = repo.filtered(filtername)
2280 2293 def d():
2281 2294 if clear_revbranch:
2282 2295 repo.revbranchcache()._clear()
2283 2296 if full:
2284 2297 view._branchcaches.clear()
2285 2298 else:
2286 2299 view._branchcaches.pop(filtername, None)
2287 2300 view.branchmap()
2288 2301 return d
2289 2302 # add filter in smaller subset to bigger subset
2290 2303 possiblefilters = set(repoview.filtertable)
2291 2304 if filternames:
2292 2305 possiblefilters &= set(filternames)
2293 2306 subsettable = getbranchmapsubsettable()
2294 2307 allfilters = []
2295 2308 while possiblefilters:
2296 2309 for name in possiblefilters:
2297 2310 subset = subsettable.get(name)
2298 2311 if subset not in possiblefilters:
2299 2312 break
2300 2313 else:
2301 2314 assert False, b'subset cycle %s!' % possiblefilters
2302 2315 allfilters.append(name)
2303 2316 possiblefilters.remove(name)
2304 2317
2305 2318 # warm the cache
2306 2319 if not full:
2307 2320 for name in allfilters:
2308 2321 repo.filtered(name).branchmap()
2309 2322 if not filternames or b'unfiltered' in filternames:
2310 2323 # add unfiltered
2311 2324 allfilters.append(None)
2312 2325
2313 2326 branchcacheread = safeattrsetter(branchmap, b'read')
2314 2327 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
2315 2328 branchcacheread.set(lambda repo: None)
2316 2329 branchcachewrite.set(lambda bc, repo: None)
2317 2330 try:
2318 2331 for name in allfilters:
2319 2332 printname = name
2320 2333 if name is None:
2321 2334 printname = b'unfiltered'
2322 2335 timer(getbranchmap(name), title=str(printname))
2323 2336 finally:
2324 2337 branchcacheread.restore()
2325 2338 branchcachewrite.restore()
2326 2339 fm.end()
2327 2340
2328 2341 @command(b'perfbranchmapupdate', [
2329 2342 (b'', b'base', [], b'subset of revision to start from'),
2330 2343 (b'', b'target', [], b'subset of revision to end with'),
2331 2344 (b'', b'clear-caches', False, b'clear cache between each runs')
2332 2345 ] + formatteropts)
2333 2346 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
2334 2347 """benchmark branchmap update from for <base> revs to <target> revs
2335 2348
2336 2349 If `--clear-caches` is passed, the following items will be reset before
2337 2350 each update:
2338 2351 * the changelog instance and associated indexes
2339 2352 * the rev-branch-cache instance
2340 2353
2341 2354 Examples:
2342 2355
2343 2356 # update for the one last revision
2344 2357 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
2345 2358
2346 2359 $ update for change coming with a new branch
2347 2360 $ hg perfbranchmapupdate --base 'stable' --target 'default'
2348 2361 """
2349 2362 from mercurial import branchmap
2350 2363 from mercurial import repoview
2351 2364 opts = _byteskwargs(opts)
2352 2365 timer, fm = gettimer(ui, opts)
2353 2366 clearcaches = opts[b'clear_caches']
2354 2367 unfi = repo.unfiltered()
2355 2368 x = [None] # used to pass data between closure
2356 2369
2357 2370 # we use a `list` here to avoid possible side effect from smartset
2358 2371 baserevs = list(scmutil.revrange(repo, base))
2359 2372 targetrevs = list(scmutil.revrange(repo, target))
2360 2373 if not baserevs:
2361 2374 raise error.Abort(b'no revisions selected for --base')
2362 2375 if not targetrevs:
2363 2376 raise error.Abort(b'no revisions selected for --target')
2364 2377
2365 2378 # make sure the target branchmap also contains the one in the base
2366 2379 targetrevs = list(set(baserevs) | set(targetrevs))
2367 2380 targetrevs.sort()
2368 2381
2369 2382 cl = repo.changelog
2370 2383 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
2371 2384 allbaserevs.sort()
2372 2385 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
2373 2386
2374 2387 newrevs = list(alltargetrevs.difference(allbaserevs))
2375 2388 newrevs.sort()
2376 2389
2377 2390 allrevs = frozenset(unfi.changelog.revs())
2378 2391 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
2379 2392 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
2380 2393
2381 2394 def basefilter(repo, visibilityexceptions=None):
2382 2395 return basefilterrevs
2383 2396
2384 2397 def targetfilter(repo, visibilityexceptions=None):
2385 2398 return targetfilterrevs
2386 2399
2387 2400 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
2388 2401 ui.status(msg % (len(allbaserevs), len(newrevs)))
2389 2402 if targetfilterrevs:
2390 2403 msg = b'(%d revisions still filtered)\n'
2391 2404 ui.status(msg % len(targetfilterrevs))
2392 2405
2393 2406 try:
2394 2407 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
2395 2408 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
2396 2409
2397 2410 baserepo = repo.filtered(b'__perf_branchmap_update_base')
2398 2411 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
2399 2412
2400 2413 # try to find an existing branchmap to reuse
2401 2414 subsettable = getbranchmapsubsettable()
2402 2415 candidatefilter = subsettable.get(None)
2403 2416 while candidatefilter is not None:
2404 2417 candidatebm = repo.filtered(candidatefilter).branchmap()
2405 2418 if candidatebm.validfor(baserepo):
2406 2419 filtered = repoview.filterrevs(repo, candidatefilter)
2407 2420 missing = [r for r in allbaserevs if r in filtered]
2408 2421 base = candidatebm.copy()
2409 2422 base.update(baserepo, missing)
2410 2423 break
2411 2424 candidatefilter = subsettable.get(candidatefilter)
2412 2425 else:
2413 2426 # no suitable subset where found
2414 2427 base = branchmap.branchcache()
2415 2428 base.update(baserepo, allbaserevs)
2416 2429
2417 2430 def setup():
2418 2431 x[0] = base.copy()
2419 2432 if clearcaches:
2420 2433 unfi._revbranchcache = None
2421 2434 clearchangelog(repo)
2422 2435
2423 2436 def bench():
2424 2437 x[0].update(targetrepo, newrevs)
2425 2438
2426 2439 timer(bench, setup=setup)
2427 2440 fm.end()
2428 2441 finally:
2429 2442 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
2430 2443 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
2431 2444
2432 2445 @command(b'perfbranchmapload', [
2433 2446 (b'f', b'filter', b'', b'Specify repoview filter'),
2434 2447 (b'', b'list', False, b'List brachmap filter caches'),
2435 2448 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
2436 2449
2437 2450 ] + formatteropts)
2438 2451 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
2439 2452 """benchmark reading the branchmap"""
2440 2453 opts = _byteskwargs(opts)
2441 2454 clearrevlogs = opts[b'clear_revlogs']
2442 2455
2443 2456 if list:
2444 2457 for name, kind, st in repo.cachevfs.readdir(stat=True):
2445 2458 if name.startswith(b'branch2'):
2446 2459 filtername = name.partition(b'-')[2] or b'unfiltered'
2447 2460 ui.status(b'%s - %s\n'
2448 2461 % (filtername, util.bytecount(st.st_size)))
2449 2462 return
2450 2463 if not filter:
2451 2464 filter = None
2452 2465 subsettable = getbranchmapsubsettable()
2453 2466 if filter is None:
2454 2467 repo = repo.unfiltered()
2455 2468 else:
2456 2469 repo = repoview.repoview(repo, filter)
2457 2470
2458 2471 repo.branchmap() # make sure we have a relevant, up to date branchmap
2459 2472
2460 2473 currentfilter = filter
2461 2474 # try once without timer, the filter may not be cached
2462 2475 while branchmap.read(repo) is None:
2463 2476 currentfilter = subsettable.get(currentfilter)
2464 2477 if currentfilter is None:
2465 2478 raise error.Abort(b'No branchmap cached for %s repo'
2466 2479 % (filter or b'unfiltered'))
2467 2480 repo = repo.filtered(currentfilter)
2468 2481 timer, fm = gettimer(ui, opts)
2469 2482 def setup():
2470 2483 if clearrevlogs:
2471 2484 clearchangelog(repo)
2472 2485 def bench():
2473 2486 branchmap.read(repo)
2474 2487 timer(bench, setup=setup)
2475 2488 fm.end()
2476 2489
2477 2490 @command(b'perfloadmarkers')
2478 2491 def perfloadmarkers(ui, repo):
2479 2492 """benchmark the time to parse the on-disk markers for a repo
2480 2493
2481 2494 Result is the number of markers in the repo."""
2482 2495 timer, fm = gettimer(ui)
2483 2496 svfs = getsvfs(repo)
2484 2497 timer(lambda: len(obsolete.obsstore(svfs)))
2485 2498 fm.end()
2486 2499
2487 2500 @command(b'perflrucachedict', formatteropts +
2488 2501 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
2489 2502 (b'', b'mincost', 0, b'smallest cost of items in cache'),
2490 2503 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
2491 2504 (b'', b'size', 4, b'size of cache'),
2492 2505 (b'', b'gets', 10000, b'number of key lookups'),
2493 2506 (b'', b'sets', 10000, b'number of key sets'),
2494 2507 (b'', b'mixed', 10000, b'number of mixed mode operations'),
2495 2508 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
2496 2509 norepo=True)
2497 2510 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
2498 2511 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
2499 2512 opts = _byteskwargs(opts)
2500 2513
2501 2514 def doinit():
2502 2515 for i in _xrange(10000):
2503 2516 util.lrucachedict(size)
2504 2517
2505 2518 costrange = list(range(mincost, maxcost + 1))
2506 2519
2507 2520 values = []
2508 2521 for i in _xrange(size):
2509 2522 values.append(random.randint(0, _maxint))
2510 2523
2511 2524 # Get mode fills the cache and tests raw lookup performance with no
2512 2525 # eviction.
2513 2526 getseq = []
2514 2527 for i in _xrange(gets):
2515 2528 getseq.append(random.choice(values))
2516 2529
2517 2530 def dogets():
2518 2531 d = util.lrucachedict(size)
2519 2532 for v in values:
2520 2533 d[v] = v
2521 2534 for key in getseq:
2522 2535 value = d[key]
2523 2536 value # silence pyflakes warning
2524 2537
2525 2538 def dogetscost():
2526 2539 d = util.lrucachedict(size, maxcost=costlimit)
2527 2540 for i, v in enumerate(values):
2528 2541 d.insert(v, v, cost=costs[i])
2529 2542 for key in getseq:
2530 2543 try:
2531 2544 value = d[key]
2532 2545 value # silence pyflakes warning
2533 2546 except KeyError:
2534 2547 pass
2535 2548
2536 2549 # Set mode tests insertion speed with cache eviction.
2537 2550 setseq = []
2538 2551 costs = []
2539 2552 for i in _xrange(sets):
2540 2553 setseq.append(random.randint(0, _maxint))
2541 2554 costs.append(random.choice(costrange))
2542 2555
2543 2556 def doinserts():
2544 2557 d = util.lrucachedict(size)
2545 2558 for v in setseq:
2546 2559 d.insert(v, v)
2547 2560
2548 2561 def doinsertscost():
2549 2562 d = util.lrucachedict(size, maxcost=costlimit)
2550 2563 for i, v in enumerate(setseq):
2551 2564 d.insert(v, v, cost=costs[i])
2552 2565
2553 2566 def dosets():
2554 2567 d = util.lrucachedict(size)
2555 2568 for v in setseq:
2556 2569 d[v] = v
2557 2570
2558 2571 # Mixed mode randomly performs gets and sets with eviction.
2559 2572 mixedops = []
2560 2573 for i in _xrange(mixed):
2561 2574 r = random.randint(0, 100)
2562 2575 if r < mixedgetfreq:
2563 2576 op = 0
2564 2577 else:
2565 2578 op = 1
2566 2579
2567 2580 mixedops.append((op,
2568 2581 random.randint(0, size * 2),
2569 2582 random.choice(costrange)))
2570 2583
2571 2584 def domixed():
2572 2585 d = util.lrucachedict(size)
2573 2586
2574 2587 for op, v, cost in mixedops:
2575 2588 if op == 0:
2576 2589 try:
2577 2590 d[v]
2578 2591 except KeyError:
2579 2592 pass
2580 2593 else:
2581 2594 d[v] = v
2582 2595
2583 2596 def domixedcost():
2584 2597 d = util.lrucachedict(size, maxcost=costlimit)
2585 2598
2586 2599 for op, v, cost in mixedops:
2587 2600 if op == 0:
2588 2601 try:
2589 2602 d[v]
2590 2603 except KeyError:
2591 2604 pass
2592 2605 else:
2593 2606 d.insert(v, v, cost=cost)
2594 2607
2595 2608 benches = [
2596 2609 (doinit, b'init'),
2597 2610 ]
2598 2611
2599 2612 if costlimit:
2600 2613 benches.extend([
2601 2614 (dogetscost, b'gets w/ cost limit'),
2602 2615 (doinsertscost, b'inserts w/ cost limit'),
2603 2616 (domixedcost, b'mixed w/ cost limit'),
2604 2617 ])
2605 2618 else:
2606 2619 benches.extend([
2607 2620 (dogets, b'gets'),
2608 2621 (doinserts, b'inserts'),
2609 2622 (dosets, b'sets'),
2610 2623 (domixed, b'mixed')
2611 2624 ])
2612 2625
2613 2626 for fn, title in benches:
2614 2627 timer, fm = gettimer(ui, opts)
2615 2628 timer(fn, title=title)
2616 2629 fm.end()
2617 2630
2618 2631 @command(b'perfwrite', formatteropts)
2619 2632 def perfwrite(ui, repo, **opts):
2620 2633 """microbenchmark ui.write
2621 2634 """
2622 2635 opts = _byteskwargs(opts)
2623 2636
2624 2637 timer, fm = gettimer(ui, opts)
2625 2638 def write():
2626 2639 for i in range(100000):
2627 2640 ui.write((b'Testing write performance\n'))
2628 2641 timer(write)
2629 2642 fm.end()
2630 2643
2631 2644 def uisetup(ui):
2632 2645 if (util.safehasattr(cmdutil, b'openrevlog') and
2633 2646 not util.safehasattr(commands, b'debugrevlogopts')):
2634 2647 # for "historical portability":
2635 2648 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
2636 2649 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
2637 2650 # openrevlog() should cause failure, because it has been
2638 2651 # available since 3.5 (or 49c583ca48c4).
2639 2652 def openrevlog(orig, repo, cmd, file_, opts):
2640 2653 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
2641 2654 raise error.Abort(b"This version doesn't support --dir option",
2642 2655 hint=b"use 3.5 or later")
2643 2656 return orig(repo, cmd, file_, opts)
2644 2657 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
2645 2658
2646 2659 @command(b'perfprogress', formatteropts + [
2647 2660 (b'', b'topic', b'topic', b'topic for progress messages'),
2648 2661 (b'c', b'total', 1000000, b'total value we are progressing to'),
2649 2662 ], norepo=True)
2650 2663 def perfprogress(ui, topic=None, total=None, **opts):
2651 2664 """printing of progress bars"""
2652 2665 opts = _byteskwargs(opts)
2653 2666
2654 2667 timer, fm = gettimer(ui, opts)
2655 2668
2656 2669 def doprogress():
2657 2670 with ui.makeprogress(topic, total=total) as progress:
2658 2671 for i in pycompat.xrange(total):
2659 2672 progress.increment()
2660 2673
2661 2674 timer(doprogress)
2662 2675 fm.end()
General Comments 0
You need to be logged in to leave comments. Login now