##// END OF EJS Templates
perf: add two more missing b prefixes for Python 3...
Augie Fackler -
r40984:a314eafd default
parent child Browse files
Show More
@@ -1,2653 +1,2653 b''
1 1 # perf.py - performance test routines
2 2 '''helper extension to measure performance'''
3 3
4 4 # "historical portability" policy of perf.py:
5 5 #
6 6 # We have to do:
7 7 # - make perf.py "loadable" with as wide Mercurial version as possible
8 8 # This doesn't mean that perf commands work correctly with that Mercurial.
9 9 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
10 10 # - make historical perf command work correctly with as wide Mercurial
11 11 # version as possible
12 12 #
13 13 # We have to do, if possible with reasonable cost:
14 14 # - make recent perf command for historical feature work correctly
15 15 # with early Mercurial
16 16 #
17 17 # We don't have to do:
18 18 # - make perf command for recent feature work correctly with early
19 19 # Mercurial
20 20
21 21 from __future__ import absolute_import
22 22 import contextlib
23 23 import functools
24 24 import gc
25 25 import os
26 26 import random
27 27 import shutil
28 28 import struct
29 29 import sys
30 30 import tempfile
31 31 import threading
32 32 import time
33 33 from mercurial import (
34 34 changegroup,
35 35 cmdutil,
36 36 commands,
37 37 copies,
38 38 error,
39 39 extensions,
40 40 hg,
41 41 mdiff,
42 42 merge,
43 43 revlog,
44 44 util,
45 45 )
46 46
47 47 # for "historical portability":
48 48 # try to import modules separately (in dict order), and ignore
49 49 # failure, because these aren't available with early Mercurial
50 50 try:
51 51 from mercurial import branchmap # since 2.5 (or bcee63733aad)
52 52 except ImportError:
53 53 pass
54 54 try:
55 55 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
56 56 except ImportError:
57 57 pass
58 58 try:
59 59 from mercurial import registrar # since 3.7 (or 37d50250b696)
60 60 dir(registrar) # forcibly load it
61 61 except ImportError:
62 62 registrar = None
63 63 try:
64 64 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
65 65 except ImportError:
66 66 pass
67 67 try:
68 68 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
69 69 except ImportError:
70 70 pass
71 71 try:
72 72 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
73 73 except ImportError:
74 74 pass
75 75
76 76
77 77 def identity(a):
78 78 return a
79 79
80 80 try:
81 81 from mercurial import pycompat
82 82 getargspec = pycompat.getargspec # added to module after 4.5
83 83 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
84 84 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
85 85 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
86 86 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
87 87 if pycompat.ispy3:
88 88 _maxint = sys.maxsize # per py3 docs for replacing maxint
89 89 else:
90 90 _maxint = sys.maxint
91 91 except (ImportError, AttributeError):
92 92 import inspect
93 93 getargspec = inspect.getargspec
94 94 _byteskwargs = identity
95 95 fsencode = identity # no py3 support
96 96 _maxint = sys.maxint # no py3 support
97 97 _sysstr = lambda x: x # no py3 support
98 98 _xrange = xrange
99 99
100 100 try:
101 101 # 4.7+
102 102 queue = pycompat.queue.Queue
103 103 except (AttributeError, ImportError):
104 104 # <4.7.
105 105 try:
106 106 queue = pycompat.queue
107 107 except (AttributeError, ImportError):
108 108 queue = util.queue
109 109
110 110 try:
111 111 from mercurial import logcmdutil
112 112 makelogtemplater = logcmdutil.maketemplater
113 113 except (AttributeError, ImportError):
114 114 try:
115 115 makelogtemplater = cmdutil.makelogtemplater
116 116 except (AttributeError, ImportError):
117 117 makelogtemplater = None
118 118
119 119 # for "historical portability":
120 120 # define util.safehasattr forcibly, because util.safehasattr has been
121 121 # available since 1.9.3 (or 94b200a11cf7)
122 122 _undefined = object()
123 123 def safehasattr(thing, attr):
124 124 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
125 125 setattr(util, 'safehasattr', safehasattr)
126 126
127 127 # for "historical portability":
128 128 # define util.timer forcibly, because util.timer has been available
129 129 # since ae5d60bb70c9
130 130 if safehasattr(time, 'perf_counter'):
131 131 util.timer = time.perf_counter
132 132 elif os.name == b'nt':
133 133 util.timer = time.clock
134 134 else:
135 135 util.timer = time.time
136 136
137 137 # for "historical portability":
138 138 # use locally defined empty option list, if formatteropts isn't
139 139 # available, because commands.formatteropts has been available since
140 140 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
141 141 # available since 2.2 (or ae5f92e154d3)
142 142 formatteropts = getattr(cmdutil, "formatteropts",
143 143 getattr(commands, "formatteropts", []))
144 144
145 145 # for "historical portability":
146 146 # use locally defined option list, if debugrevlogopts isn't available,
147 147 # because commands.debugrevlogopts has been available since 3.7 (or
148 148 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
149 149 # since 1.9 (or a79fea6b3e77).
150 150 revlogopts = getattr(cmdutil, "debugrevlogopts",
151 151 getattr(commands, "debugrevlogopts", [
152 152 (b'c', b'changelog', False, (b'open changelog')),
153 153 (b'm', b'manifest', False, (b'open manifest')),
154 154 (b'', b'dir', False, (b'open directory manifest')),
155 155 ]))
156 156
157 157 cmdtable = {}
158 158
159 159 # for "historical portability":
160 160 # define parsealiases locally, because cmdutil.parsealiases has been
161 161 # available since 1.5 (or 6252852b4332)
162 162 def parsealiases(cmd):
163 163 return cmd.split(b"|")
164 164
165 165 if safehasattr(registrar, 'command'):
166 166 command = registrar.command(cmdtable)
167 167 elif safehasattr(cmdutil, 'command'):
168 168 command = cmdutil.command(cmdtable)
169 169 if b'norepo' not in getargspec(command).args:
170 170 # for "historical portability":
171 171 # wrap original cmdutil.command, because "norepo" option has
172 172 # been available since 3.1 (or 75a96326cecb)
173 173 _command = command
174 174 def command(name, options=(), synopsis=None, norepo=False):
175 175 if norepo:
176 176 commands.norepo += b' %s' % b' '.join(parsealiases(name))
177 177 return _command(name, list(options), synopsis)
178 178 else:
179 179 # for "historical portability":
180 180 # define "@command" annotation locally, because cmdutil.command
181 181 # has been available since 1.9 (or 2daa5179e73f)
182 182 def command(name, options=(), synopsis=None, norepo=False):
183 183 def decorator(func):
184 184 if synopsis:
185 185 cmdtable[name] = func, list(options), synopsis
186 186 else:
187 187 cmdtable[name] = func, list(options)
188 188 if norepo:
189 189 commands.norepo += b' %s' % b' '.join(parsealiases(name))
190 190 return func
191 191 return decorator
192 192
193 193 try:
194 194 import mercurial.registrar
195 195 import mercurial.configitems
196 196 configtable = {}
197 197 configitem = mercurial.registrar.configitem(configtable)
198 198 configitem(b'perf', b'presleep',
199 199 default=mercurial.configitems.dynamicdefault,
200 200 )
201 201 configitem(b'perf', b'stub',
202 202 default=mercurial.configitems.dynamicdefault,
203 203 )
204 204 configitem(b'perf', b'parentscount',
205 205 default=mercurial.configitems.dynamicdefault,
206 206 )
207 207 configitem(b'perf', b'all-timing',
208 208 default=mercurial.configitems.dynamicdefault,
209 209 )
210 210 except (ImportError, AttributeError):
211 211 pass
212 212
213 213 def getlen(ui):
214 214 if ui.configbool(b"perf", b"stub", False):
215 215 return lambda x: 1
216 216 return len
217 217
218 218 def gettimer(ui, opts=None):
219 219 """return a timer function and formatter: (timer, formatter)
220 220
221 221 This function exists to gather the creation of formatter in a single
222 222 place instead of duplicating it in all performance commands."""
223 223
224 224 # enforce an idle period before execution to counteract power management
225 225 # experimental config: perf.presleep
226 226 time.sleep(getint(ui, b"perf", b"presleep", 1))
227 227
228 228 if opts is None:
229 229 opts = {}
230 230 # redirect all to stderr unless buffer api is in use
231 231 if not ui._buffers:
232 232 ui = ui.copy()
233 233 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
234 234 if uifout:
235 235 # for "historical portability":
236 236 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
237 237 uifout.set(ui.ferr)
238 238
239 239 # get a formatter
240 240 uiformatter = getattr(ui, 'formatter', None)
241 241 if uiformatter:
242 242 fm = uiformatter(b'perf', opts)
243 243 else:
244 244 # for "historical portability":
245 245 # define formatter locally, because ui.formatter has been
246 246 # available since 2.2 (or ae5f92e154d3)
247 247 from mercurial import node
248 248 class defaultformatter(object):
249 249 """Minimized composition of baseformatter and plainformatter
250 250 """
251 251 def __init__(self, ui, topic, opts):
252 252 self._ui = ui
253 253 if ui.debugflag:
254 254 self.hexfunc = node.hex
255 255 else:
256 256 self.hexfunc = node.short
257 257 def __nonzero__(self):
258 258 return False
259 259 __bool__ = __nonzero__
260 260 def startitem(self):
261 261 pass
262 262 def data(self, **data):
263 263 pass
264 264 def write(self, fields, deftext, *fielddata, **opts):
265 265 self._ui.write(deftext % fielddata, **opts)
266 266 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
267 267 if cond:
268 268 self._ui.write(deftext % fielddata, **opts)
269 269 def plain(self, text, **opts):
270 270 self._ui.write(text, **opts)
271 271 def end(self):
272 272 pass
273 273 fm = defaultformatter(ui, b'perf', opts)
274 274
275 275 # stub function, runs code only once instead of in a loop
276 276 # experimental config: perf.stub
277 277 if ui.configbool(b"perf", b"stub", False):
278 278 return functools.partial(stub_timer, fm), fm
279 279
280 280 # experimental config: perf.all-timing
281 281 displayall = ui.configbool(b"perf", b"all-timing", False)
282 282 return functools.partial(_timer, fm, displayall=displayall), fm
283 283
284 284 def stub_timer(fm, func, setup=None, title=None):
285 285 if setup is not None:
286 286 setup()
287 287 func()
288 288
289 289 @contextlib.contextmanager
290 290 def timeone():
291 291 r = []
292 292 ostart = os.times()
293 293 cstart = util.timer()
294 294 yield r
295 295 cstop = util.timer()
296 296 ostop = os.times()
297 297 a, b = ostart, ostop
298 298 r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
299 299
300 300 def _timer(fm, func, setup=None, title=None, displayall=False):
301 301 gc.collect()
302 302 results = []
303 303 begin = util.timer()
304 304 count = 0
305 305 while True:
306 306 if setup is not None:
307 307 setup()
308 308 with timeone() as item:
309 309 r = func()
310 310 count += 1
311 311 results.append(item[0])
312 312 cstop = util.timer()
313 313 if cstop - begin > 3 and count >= 100:
314 314 break
315 315 if cstop - begin > 10 and count >= 3:
316 316 break
317 317
318 318 formatone(fm, results, title=title, result=r,
319 319 displayall=displayall)
320 320
321 321 def formatone(fm, timings, title=None, result=None, displayall=False):
322 322
323 323 count = len(timings)
324 324
325 325 fm.startitem()
326 326
327 327 if title:
328 328 fm.write(b'title', b'! %s\n', title)
329 329 if result:
330 330 fm.write(b'result', b'! result: %s\n', result)
331 331 def display(role, entry):
332 332 prefix = b''
333 333 if role != b'best':
334 334 prefix = b'%s.' % role
335 335 fm.plain(b'!')
336 336 fm.write(prefix + b'wall', b' wall %f', entry[0])
337 337 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
338 338 fm.write(prefix + b'user', b' user %f', entry[1])
339 339 fm.write(prefix + b'sys', b' sys %f', entry[2])
340 340 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
341 341 fm.plain(b'\n')
342 342 timings.sort()
343 343 min_val = timings[0]
344 344 display(b'best', min_val)
345 345 if displayall:
346 346 max_val = timings[-1]
347 347 display(b'max', max_val)
348 348 avg = tuple([sum(x) / count for x in zip(*timings)])
349 349 display(b'avg', avg)
350 350 median = timings[len(timings) // 2]
351 351 display(b'median', median)
352 352
353 353 # utilities for historical portability
354 354
355 355 def getint(ui, section, name, default):
356 356 # for "historical portability":
357 357 # ui.configint has been available since 1.9 (or fa2b596db182)
358 358 v = ui.config(section, name, None)
359 359 if v is None:
360 360 return default
361 361 try:
362 362 return int(v)
363 363 except ValueError:
364 364 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
365 365 % (section, name, v))
366 366
367 367 def safeattrsetter(obj, name, ignoremissing=False):
368 368 """Ensure that 'obj' has 'name' attribute before subsequent setattr
369 369
370 370 This function is aborted, if 'obj' doesn't have 'name' attribute
371 371 at runtime. This avoids overlooking removal of an attribute, which
372 372 breaks assumption of performance measurement, in the future.
373 373
374 374 This function returns the object to (1) assign a new value, and
375 375 (2) restore an original value to the attribute.
376 376
377 377 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
378 378 abortion, and this function returns None. This is useful to
379 379 examine an attribute, which isn't ensured in all Mercurial
380 380 versions.
381 381 """
382 382 if not util.safehasattr(obj, name):
383 383 if ignoremissing:
384 384 return None
385 385 raise error.Abort((b"missing attribute %s of %s might break assumption"
386 386 b" of performance measurement") % (name, obj))
387 387
388 388 origvalue = getattr(obj, _sysstr(name))
389 389 class attrutil(object):
390 390 def set(self, newvalue):
391 391 setattr(obj, _sysstr(name), newvalue)
392 392 def restore(self):
393 393 setattr(obj, _sysstr(name), origvalue)
394 394
395 395 return attrutil()
396 396
397 397 # utilities to examine each internal API changes
398 398
399 399 def getbranchmapsubsettable():
400 400 # for "historical portability":
401 401 # subsettable is defined in:
402 402 # - branchmap since 2.9 (or 175c6fd8cacc)
403 403 # - repoview since 2.5 (or 59a9f18d4587)
404 404 for mod in (branchmap, repoview):
405 405 subsettable = getattr(mod, 'subsettable', None)
406 406 if subsettable:
407 407 return subsettable
408 408
409 409 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
410 410 # branchmap and repoview modules exist, but subsettable attribute
411 411 # doesn't)
412 412 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
413 413 hint=b"use 2.5 or later")
414 414
415 415 def getsvfs(repo):
416 416 """Return appropriate object to access files under .hg/store
417 417 """
418 418 # for "historical portability":
419 419 # repo.svfs has been available since 2.3 (or 7034365089bf)
420 420 svfs = getattr(repo, 'svfs', None)
421 421 if svfs:
422 422 return svfs
423 423 else:
424 424 return getattr(repo, 'sopener')
425 425
426 426 def getvfs(repo):
427 427 """Return appropriate object to access files under .hg
428 428 """
429 429 # for "historical portability":
430 430 # repo.vfs has been available since 2.3 (or 7034365089bf)
431 431 vfs = getattr(repo, 'vfs', None)
432 432 if vfs:
433 433 return vfs
434 434 else:
435 435 return getattr(repo, 'opener')
436 436
437 437 def repocleartagscachefunc(repo):
438 438 """Return the function to clear tags cache according to repo internal API
439 439 """
440 440 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
441 441 # in this case, setattr(repo, '_tagscache', None) or so isn't
442 442 # correct way to clear tags cache, because existing code paths
443 443 # expect _tagscache to be a structured object.
444 444 def clearcache():
445 445 # _tagscache has been filteredpropertycache since 2.5 (or
446 446 # 98c867ac1330), and delattr() can't work in such case
447 447 if b'_tagscache' in vars(repo):
448 448 del repo.__dict__[b'_tagscache']
449 449 return clearcache
450 450
451 451 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
452 452 if repotags: # since 1.4 (or 5614a628d173)
453 453 return lambda : repotags.set(None)
454 454
455 455 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
456 456 if repotagscache: # since 0.6 (or d7df759d0e97)
457 457 return lambda : repotagscache.set(None)
458 458
459 459 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
460 460 # this point, but it isn't so problematic, because:
461 461 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
462 462 # in perftags() causes failure soon
463 463 # - perf.py itself has been available since 1.1 (or eb240755386d)
464 464 raise error.Abort((b"tags API of this hg command is unknown"))
465 465
466 466 # utilities to clear cache
467 467
468 468 def clearfilecache(obj, attrname):
469 469 unfiltered = getattr(obj, 'unfiltered', None)
470 470 if unfiltered is not None:
471 471 obj = obj.unfiltered()
472 472 if attrname in vars(obj):
473 473 delattr(obj, attrname)
474 474 obj._filecache.pop(attrname, None)
475 475
476 476 def clearchangelog(repo):
477 477 if repo is not repo.unfiltered():
478 478 object.__setattr__(repo, r'_clcachekey', None)
479 479 object.__setattr__(repo, r'_clcache', None)
480 480 clearfilecache(repo.unfiltered(), 'changelog')
481 481
482 482 # perf commands
483 483
484 484 @command(b'perfwalk', formatteropts)
485 485 def perfwalk(ui, repo, *pats, **opts):
486 486 opts = _byteskwargs(opts)
487 487 timer, fm = gettimer(ui, opts)
488 488 m = scmutil.match(repo[None], pats, {})
489 489 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
490 490 ignored=False))))
491 491 fm.end()
492 492
493 493 @command(b'perfannotate', formatteropts)
494 494 def perfannotate(ui, repo, f, **opts):
495 495 opts = _byteskwargs(opts)
496 496 timer, fm = gettimer(ui, opts)
497 497 fc = repo[b'.'][f]
498 498 timer(lambda: len(fc.annotate(True)))
499 499 fm.end()
500 500
501 501 @command(b'perfstatus',
502 502 [(b'u', b'unknown', False,
503 503 b'ask status to look for unknown files')] + formatteropts)
504 504 def perfstatus(ui, repo, **opts):
505 505 opts = _byteskwargs(opts)
506 506 #m = match.always(repo.root, repo.getcwd())
507 507 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
508 508 # False))))
509 509 timer, fm = gettimer(ui, opts)
510 510 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
511 511 fm.end()
512 512
513 513 @command(b'perfaddremove', formatteropts)
514 514 def perfaddremove(ui, repo, **opts):
515 515 opts = _byteskwargs(opts)
516 516 timer, fm = gettimer(ui, opts)
517 517 try:
518 518 oldquiet = repo.ui.quiet
519 519 repo.ui.quiet = True
520 520 matcher = scmutil.match(repo[None])
521 521 opts[b'dry_run'] = True
522 522 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
523 523 finally:
524 524 repo.ui.quiet = oldquiet
525 525 fm.end()
526 526
527 527 def clearcaches(cl):
528 528 # behave somewhat consistently across internal API changes
529 529 if util.safehasattr(cl, b'clearcaches'):
530 530 cl.clearcaches()
531 531 elif util.safehasattr(cl, b'_nodecache'):
532 532 from mercurial.node import nullid, nullrev
533 533 cl._nodecache = {nullid: nullrev}
534 534 cl._nodepos = None
535 535
536 536 @command(b'perfheads', formatteropts)
537 537 def perfheads(ui, repo, **opts):
538 538 opts = _byteskwargs(opts)
539 539 timer, fm = gettimer(ui, opts)
540 540 cl = repo.changelog
541 541 def d():
542 542 len(cl.headrevs())
543 543 clearcaches(cl)
544 544 timer(d)
545 545 fm.end()
546 546
547 547 @command(b'perftags', formatteropts+
548 548 [
549 549 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
550 550 ])
551 551 def perftags(ui, repo, **opts):
552 552 opts = _byteskwargs(opts)
553 553 timer, fm = gettimer(ui, opts)
554 554 repocleartagscache = repocleartagscachefunc(repo)
555 555 clearrevlogs = opts[b'clear_revlogs']
556 556 def s():
557 557 if clearrevlogs:
558 558 clearchangelog(repo)
559 559 clearfilecache(repo.unfiltered(), 'manifest')
560 560 repocleartagscache()
561 561 def t():
562 562 return len(repo.tags())
563 563 timer(t, setup=s)
564 564 fm.end()
565 565
566 566 @command(b'perfancestors', formatteropts)
567 567 def perfancestors(ui, repo, **opts):
568 568 opts = _byteskwargs(opts)
569 569 timer, fm = gettimer(ui, opts)
570 570 heads = repo.changelog.headrevs()
571 571 def d():
572 572 for a in repo.changelog.ancestors(heads):
573 573 pass
574 574 timer(d)
575 575 fm.end()
576 576
577 577 @command(b'perfancestorset', formatteropts)
578 578 def perfancestorset(ui, repo, revset, **opts):
579 579 opts = _byteskwargs(opts)
580 580 timer, fm = gettimer(ui, opts)
581 581 revs = repo.revs(revset)
582 582 heads = repo.changelog.headrevs()
583 583 def d():
584 584 s = repo.changelog.ancestors(heads)
585 585 for rev in revs:
586 586 rev in s
587 587 timer(d)
588 588 fm.end()
589 589
590 590 @command(b'perfdiscovery', formatteropts, b'PATH')
591 591 def perfdiscovery(ui, repo, path, **opts):
592 592 """benchmark discovery between local repo and the peer at given path
593 593 """
594 594 repos = [repo, None]
595 595 timer, fm = gettimer(ui, opts)
596 596 path = ui.expandpath(path)
597 597
598 598 def s():
599 599 repos[1] = hg.peer(ui, opts, path)
600 600 def d():
601 601 setdiscovery.findcommonheads(ui, *repos)
602 602 timer(d, setup=s)
603 603 fm.end()
604 604
605 605 @command(b'perfbookmarks', formatteropts +
606 606 [
607 607 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
608 608 ])
609 609 def perfbookmarks(ui, repo, **opts):
610 610 """benchmark parsing bookmarks from disk to memory"""
611 611 opts = _byteskwargs(opts)
612 612 timer, fm = gettimer(ui, opts)
613 613
614 614 clearrevlogs = opts[b'clear_revlogs']
615 615 def s():
616 616 if clearrevlogs:
617 617 clearchangelog(repo)
618 618 clearfilecache(repo, b'_bookmarks')
619 619 def d():
620 620 repo._bookmarks
621 621 timer(d, setup=s)
622 622 fm.end()
623 623
624 624 @command(b'perfbundleread', formatteropts, b'BUNDLE')
625 625 def perfbundleread(ui, repo, bundlepath, **opts):
626 626 """Benchmark reading of bundle files.
627 627
628 628 This command is meant to isolate the I/O part of bundle reading as
629 629 much as possible.
630 630 """
631 631 from mercurial import (
632 632 bundle2,
633 633 exchange,
634 634 streamclone,
635 635 )
636 636
637 637 opts = _byteskwargs(opts)
638 638
639 639 def makebench(fn):
640 640 def run():
641 641 with open(bundlepath, b'rb') as fh:
642 642 bundle = exchange.readbundle(ui, fh, bundlepath)
643 643 fn(bundle)
644 644
645 645 return run
646 646
647 647 def makereadnbytes(size):
648 648 def run():
649 649 with open(bundlepath, b'rb') as fh:
650 650 bundle = exchange.readbundle(ui, fh, bundlepath)
651 651 while bundle.read(size):
652 652 pass
653 653
654 654 return run
655 655
656 656 def makestdioread(size):
657 657 def run():
658 658 with open(bundlepath, b'rb') as fh:
659 659 while fh.read(size):
660 660 pass
661 661
662 662 return run
663 663
664 664 # bundle1
665 665
666 666 def deltaiter(bundle):
667 667 for delta in bundle.deltaiter():
668 668 pass
669 669
670 670 def iterchunks(bundle):
671 671 for chunk in bundle.getchunks():
672 672 pass
673 673
674 674 # bundle2
675 675
676 676 def forwardchunks(bundle):
677 677 for chunk in bundle._forwardchunks():
678 678 pass
679 679
680 680 def iterparts(bundle):
681 681 for part in bundle.iterparts():
682 682 pass
683 683
684 684 def iterpartsseekable(bundle):
685 685 for part in bundle.iterparts(seekable=True):
686 686 pass
687 687
688 688 def seek(bundle):
689 689 for part in bundle.iterparts(seekable=True):
690 690 part.seek(0, os.SEEK_END)
691 691
692 692 def makepartreadnbytes(size):
693 693 def run():
694 694 with open(bundlepath, b'rb') as fh:
695 695 bundle = exchange.readbundle(ui, fh, bundlepath)
696 696 for part in bundle.iterparts():
697 697 while part.read(size):
698 698 pass
699 699
700 700 return run
701 701
702 702 benches = [
703 703 (makestdioread(8192), b'read(8k)'),
704 704 (makestdioread(16384), b'read(16k)'),
705 705 (makestdioread(32768), b'read(32k)'),
706 706 (makestdioread(131072), b'read(128k)'),
707 707 ]
708 708
709 709 with open(bundlepath, b'rb') as fh:
710 710 bundle = exchange.readbundle(ui, fh, bundlepath)
711 711
712 712 if isinstance(bundle, changegroup.cg1unpacker):
713 713 benches.extend([
714 714 (makebench(deltaiter), b'cg1 deltaiter()'),
715 715 (makebench(iterchunks), b'cg1 getchunks()'),
716 716 (makereadnbytes(8192), b'cg1 read(8k)'),
717 717 (makereadnbytes(16384), b'cg1 read(16k)'),
718 718 (makereadnbytes(32768), b'cg1 read(32k)'),
719 719 (makereadnbytes(131072), b'cg1 read(128k)'),
720 720 ])
721 721 elif isinstance(bundle, bundle2.unbundle20):
722 722 benches.extend([
723 723 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
724 724 (makebench(iterparts), b'bundle2 iterparts()'),
725 725 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
726 726 (makebench(seek), b'bundle2 part seek()'),
727 727 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
728 728 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
729 729 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
730 730 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
731 731 ])
732 732 elif isinstance(bundle, streamclone.streamcloneapplier):
733 733 raise error.Abort(b'stream clone bundles not supported')
734 734 else:
735 735 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
736 736
737 737 for fn, title in benches:
738 738 timer, fm = gettimer(ui, opts)
739 739 timer(fn, title=title)
740 740 fm.end()
741 741
742 742 @command(b'perfchangegroupchangelog', formatteropts +
743 743 [(b'', b'cgversion', b'02', b'changegroup version'),
744 744 (b'r', b'rev', b'', b'revisions to add to changegroup')])
745 745 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
746 746 """Benchmark producing a changelog group for a changegroup.
747 747
748 748 This measures the time spent processing the changelog during a
749 749 bundle operation. This occurs during `hg bundle` and on a server
750 750 processing a `getbundle` wire protocol request (handles clones
751 751 and pull requests).
752 752
753 753 By default, all revisions are added to the changegroup.
754 754 """
755 755 opts = _byteskwargs(opts)
756 756 cl = repo.changelog
757 757 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
758 758 bundler = changegroup.getbundler(cgversion, repo)
759 759
760 760 def d():
761 761 state, chunks = bundler._generatechangelog(cl, nodes)
762 762 for chunk in chunks:
763 763 pass
764 764
765 765 timer, fm = gettimer(ui, opts)
766 766
767 767 # Terminal printing can interfere with timing. So disable it.
768 768 with ui.configoverride({(b'progress', b'disable'): True}):
769 769 timer(d)
770 770
771 771 fm.end()
772 772
773 773 @command(b'perfdirs', formatteropts)
774 774 def perfdirs(ui, repo, **opts):
775 775 opts = _byteskwargs(opts)
776 776 timer, fm = gettimer(ui, opts)
777 777 dirstate = repo.dirstate
778 778 b'a' in dirstate
779 779 def d():
780 780 dirstate.hasdir(b'a')
781 781 del dirstate._map._dirs
782 782 timer(d)
783 783 fm.end()
784 784
785 785 @command(b'perfdirstate', formatteropts)
786 786 def perfdirstate(ui, repo, **opts):
787 787 opts = _byteskwargs(opts)
788 788 timer, fm = gettimer(ui, opts)
789 789 b"a" in repo.dirstate
790 790 def d():
791 791 repo.dirstate.invalidate()
792 792 b"a" in repo.dirstate
793 793 timer(d)
794 794 fm.end()
795 795
796 796 @command(b'perfdirstatedirs', formatteropts)
797 797 def perfdirstatedirs(ui, repo, **opts):
798 798 opts = _byteskwargs(opts)
799 799 timer, fm = gettimer(ui, opts)
800 800 b"a" in repo.dirstate
801 801 def d():
802 802 repo.dirstate.hasdir(b"a")
803 803 del repo.dirstate._map._dirs
804 804 timer(d)
805 805 fm.end()
806 806
807 807 @command(b'perfdirstatefoldmap', formatteropts)
808 808 def perfdirstatefoldmap(ui, repo, **opts):
809 809 opts = _byteskwargs(opts)
810 810 timer, fm = gettimer(ui, opts)
811 811 dirstate = repo.dirstate
812 812 b'a' in dirstate
813 813 def d():
814 814 dirstate._map.filefoldmap.get(b'a')
815 815 del dirstate._map.filefoldmap
816 816 timer(d)
817 817 fm.end()
818 818
819 819 @command(b'perfdirfoldmap', formatteropts)
820 820 def perfdirfoldmap(ui, repo, **opts):
821 821 opts = _byteskwargs(opts)
822 822 timer, fm = gettimer(ui, opts)
823 823 dirstate = repo.dirstate
824 824 b'a' in dirstate
825 825 def d():
826 826 dirstate._map.dirfoldmap.get(b'a')
827 827 del dirstate._map.dirfoldmap
828 828 del dirstate._map._dirs
829 829 timer(d)
830 830 fm.end()
831 831
832 832 @command(b'perfdirstatewrite', formatteropts)
833 833 def perfdirstatewrite(ui, repo, **opts):
834 834 opts = _byteskwargs(opts)
835 835 timer, fm = gettimer(ui, opts)
836 836 ds = repo.dirstate
837 837 b"a" in ds
838 838 def d():
839 839 ds._dirty = True
840 840 ds.write(repo.currenttransaction())
841 841 timer(d)
842 842 fm.end()
843 843
844 844 @command(b'perfmergecalculate',
845 845 [(b'r', b'rev', b'.', b'rev to merge against')] + formatteropts)
846 846 def perfmergecalculate(ui, repo, rev, **opts):
847 847 opts = _byteskwargs(opts)
848 848 timer, fm = gettimer(ui, opts)
849 849 wctx = repo[None]
850 850 rctx = scmutil.revsingle(repo, rev, rev)
851 851 ancestor = wctx.ancestor(rctx)
852 852 # we don't want working dir files to be stat'd in the benchmark, so prime
853 853 # that cache
854 854 wctx.dirty()
855 855 def d():
856 856 # acceptremote is True because we don't want prompts in the middle of
857 857 # our benchmark
858 858 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
859 859 acceptremote=True, followcopies=True)
860 860 timer(d)
861 861 fm.end()
862 862
863 863 @command(b'perfpathcopies', [], b"REV REV")
864 864 def perfpathcopies(ui, repo, rev1, rev2, **opts):
865 865 """benchmark the copy tracing logic"""
866 866 opts = _byteskwargs(opts)
867 867 timer, fm = gettimer(ui, opts)
868 868 ctx1 = scmutil.revsingle(repo, rev1, rev1)
869 869 ctx2 = scmutil.revsingle(repo, rev2, rev2)
870 870 def d():
871 871 copies.pathcopies(ctx1, ctx2)
872 872 timer(d)
873 873 fm.end()
874 874
875 875 @command(b'perfphases',
876 876 [(b'', b'full', False, b'include file reading time too'),
877 877 ], b"")
878 878 def perfphases(ui, repo, **opts):
879 879 """benchmark phasesets computation"""
880 880 opts = _byteskwargs(opts)
881 881 timer, fm = gettimer(ui, opts)
882 882 _phases = repo._phasecache
883 883 full = opts.get(b'full')
884 884 def d():
885 885 phases = _phases
886 886 if full:
887 887 clearfilecache(repo, b'_phasecache')
888 888 phases = repo._phasecache
889 889 phases.invalidate()
890 890 phases.loadphaserevs(repo)
891 891 timer(d)
892 892 fm.end()
893 893
894 894 @command(b'perfphasesremote',
895 895 [], b"[DEST]")
896 896 def perfphasesremote(ui, repo, dest=None, **opts):
897 897 """benchmark time needed to analyse phases of the remote server"""
898 898 from mercurial.node import (
899 899 bin,
900 900 )
901 901 from mercurial import (
902 902 exchange,
903 903 hg,
904 904 phases,
905 905 )
906 906 opts = _byteskwargs(opts)
907 907 timer, fm = gettimer(ui, opts)
908 908
909 909 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
910 910 if not path:
911 911 raise error.Abort((b'default repository not configured!'),
912 912 hint=(b"see 'hg help config.paths'"))
913 913 dest = path.pushloc or path.loc
914 914 branches = (path.branch, opts.get(b'branch') or [])
915 915 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
916 916 revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get(b'rev'))
917 917 other = hg.peer(repo, opts, dest)
918 918
919 919 # easier to perform discovery through the operation
920 920 op = exchange.pushoperation(repo, other)
921 921 exchange._pushdiscoverychangeset(op)
922 922
923 923 remotesubset = op.fallbackheads
924 924
925 925 with other.commandexecutor() as e:
926 926 remotephases = e.callcommand(b'listkeys',
927 927 {b'namespace': b'phases'}).result()
928 928 del other
929 929 publishing = remotephases.get(b'publishing', False)
930 930 if publishing:
931 931 ui.status((b'publishing: yes\n'))
932 932 else:
933 933 ui.status((b'publishing: no\n'))
934 934
935 935 nodemap = repo.changelog.nodemap
936 936 nonpublishroots = 0
937 937 for nhex, phase in remotephases.iteritems():
938 938 if nhex == b'publishing': # ignore data related to publish option
939 939 continue
940 940 node = bin(nhex)
941 941 if node in nodemap and int(phase):
942 942 nonpublishroots += 1
943 943 ui.status((b'number of roots: %d\n') % len(remotephases))
944 944 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
945 945 def d():
946 946 phases.remotephasessummary(repo,
947 947 remotesubset,
948 948 remotephases)
949 949 timer(d)
950 950 fm.end()
951 951
952 952 @command(b'perfmanifest',[
953 953 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
954 954 (b'', b'clear-disk', False, b'clear on-disk caches too'),
955 955 ] + formatteropts, b'REV|NODE')
956 956 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
957 957 """benchmark the time to read a manifest from disk and return a usable
958 958 dict-like object
959 959
960 960 Manifest caches are cleared before retrieval."""
961 961 opts = _byteskwargs(opts)
962 962 timer, fm = gettimer(ui, opts)
963 963 if not manifest_rev:
964 964 ctx = scmutil.revsingle(repo, rev, rev)
965 965 t = ctx.manifestnode()
966 966 else:
967 967 from mercurial.node import bin
968 968
969 969 if len(rev) == 40:
970 970 t = bin(rev)
971 971 else:
972 972 try:
973 973 rev = int(rev)
974 974
975 975 if util.safehasattr(repo.manifestlog, b'getstorage'):
976 976 t = repo.manifestlog.getstorage(b'').node(rev)
977 977 else:
978 978 t = repo.manifestlog._revlog.lookup(rev)
979 979 except ValueError:
980 980 raise error.Abort(b'manifest revision must be integer or full '
981 981 b'node')
982 982 def d():
983 983 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
984 984 repo.manifestlog[t].read()
985 985 timer(d)
986 986 fm.end()
987 987
988 988 @command(b'perfchangeset', formatteropts)
989 989 def perfchangeset(ui, repo, rev, **opts):
990 990 opts = _byteskwargs(opts)
991 991 timer, fm = gettimer(ui, opts)
992 992 n = scmutil.revsingle(repo, rev).node()
993 993 def d():
994 994 repo.changelog.read(n)
995 995 #repo.changelog._cache = None
996 996 timer(d)
997 997 fm.end()
998 998
999 999 @command(b'perfignore', formatteropts)
1000 1000 def perfignore(ui, repo, **opts):
1001 1001 """benchmark operation related to computing ignore"""
1002 1002 opts = _byteskwargs(opts)
1003 1003 timer, fm = gettimer(ui, opts)
1004 1004 dirstate = repo.dirstate
1005 1005
1006 1006 def setupone():
1007 1007 dirstate.invalidate()
1008 1008 clearfilecache(dirstate, b'_ignore')
1009 1009
1010 1010 def runone():
1011 1011 dirstate._ignore
1012 1012
1013 1013 timer(runone, setup=setupone, title=b"load")
1014 1014 fm.end()
1015 1015
1016 1016 @command(b'perfindex', [
1017 1017 (b'', b'rev', b'', b'revision to be looked up (default tip)'),
1018 1018 ] + formatteropts)
1019 1019 def perfindex(ui, repo, **opts):
1020 1020 import mercurial.revlog
1021 1021 opts = _byteskwargs(opts)
1022 1022 timer, fm = gettimer(ui, opts)
1023 1023 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1024 if opts['rev'] is None:
1024 if opts[b'rev'] is None:
1025 1025 n = repo[b"tip"].node()
1026 1026 else:
1027 rev = scmutil.revsingle(repo, opts['rev'])
1027 rev = scmutil.revsingle(repo, opts[b'rev'])
1028 1028 n = repo[rev].node()
1029 1029
1030 1030 unfi = repo.unfiltered()
1031 1031 # find the filecache func directly
1032 1032 # This avoid polluting the benchmark with the filecache logic
1033 1033 makecl = unfi.__class__.changelog.func
1034 1034 def setup():
1035 1035 # probably not necessary, but for good measure
1036 1036 clearchangelog(unfi)
1037 1037 def d():
1038 1038 cl = makecl(unfi)
1039 1039 cl.rev(n)
1040 1040 timer(d, setup=setup)
1041 1041 fm.end()
1042 1042
1043 1043 @command(b'perfstartup', formatteropts)
1044 1044 def perfstartup(ui, repo, **opts):
1045 1045 opts = _byteskwargs(opts)
1046 1046 timer, fm = gettimer(ui, opts)
1047 1047 def d():
1048 1048 if os.name != r'nt':
1049 1049 os.system(b"HGRCPATH= %s version -q > /dev/null" %
1050 1050 fsencode(sys.argv[0]))
1051 1051 else:
1052 1052 os.environ[r'HGRCPATH'] = r' '
1053 1053 os.system(r"%s version -q > NUL" % sys.argv[0])
1054 1054 timer(d)
1055 1055 fm.end()
1056 1056
1057 1057 @command(b'perfparents', formatteropts)
1058 1058 def perfparents(ui, repo, **opts):
1059 1059 opts = _byteskwargs(opts)
1060 1060 timer, fm = gettimer(ui, opts)
1061 1061 # control the number of commits perfparents iterates over
1062 1062 # experimental config: perf.parentscount
1063 1063 count = getint(ui, b"perf", b"parentscount", 1000)
1064 1064 if len(repo.changelog) < count:
1065 1065 raise error.Abort(b"repo needs %d commits for this test" % count)
1066 1066 repo = repo.unfiltered()
1067 1067 nl = [repo.changelog.node(i) for i in _xrange(count)]
1068 1068 def d():
1069 1069 for n in nl:
1070 1070 repo.changelog.parents(n)
1071 1071 timer(d)
1072 1072 fm.end()
1073 1073
1074 1074 @command(b'perfctxfiles', formatteropts)
1075 1075 def perfctxfiles(ui, repo, x, **opts):
1076 1076 opts = _byteskwargs(opts)
1077 1077 x = int(x)
1078 1078 timer, fm = gettimer(ui, opts)
1079 1079 def d():
1080 1080 len(repo[x].files())
1081 1081 timer(d)
1082 1082 fm.end()
1083 1083
1084 1084 @command(b'perfrawfiles', formatteropts)
1085 1085 def perfrawfiles(ui, repo, x, **opts):
1086 1086 opts = _byteskwargs(opts)
1087 1087 x = int(x)
1088 1088 timer, fm = gettimer(ui, opts)
1089 1089 cl = repo.changelog
1090 1090 def d():
1091 1091 len(cl.read(x)[3])
1092 1092 timer(d)
1093 1093 fm.end()
1094 1094
1095 1095 @command(b'perflookup', formatteropts)
1096 1096 def perflookup(ui, repo, rev, **opts):
1097 1097 opts = _byteskwargs(opts)
1098 1098 timer, fm = gettimer(ui, opts)
1099 1099 timer(lambda: len(repo.lookup(rev)))
1100 1100 fm.end()
1101 1101
1102 1102 @command(b'perflinelogedits',
1103 1103 [(b'n', b'edits', 10000, b'number of edits'),
1104 1104 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1105 1105 ], norepo=True)
1106 1106 def perflinelogedits(ui, **opts):
1107 1107 from mercurial import linelog
1108 1108
1109 1109 opts = _byteskwargs(opts)
1110 1110
1111 1111 edits = opts[b'edits']
1112 1112 maxhunklines = opts[b'max_hunk_lines']
1113 1113
1114 1114 maxb1 = 100000
1115 1115 random.seed(0)
1116 1116 randint = random.randint
1117 1117 currentlines = 0
1118 1118 arglist = []
1119 1119 for rev in _xrange(edits):
1120 1120 a1 = randint(0, currentlines)
1121 1121 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1122 1122 b1 = randint(0, maxb1)
1123 1123 b2 = randint(b1, b1 + maxhunklines)
1124 1124 currentlines += (b2 - b1) - (a2 - a1)
1125 1125 arglist.append((rev, a1, a2, b1, b2))
1126 1126
1127 1127 def d():
1128 1128 ll = linelog.linelog()
1129 1129 for args in arglist:
1130 1130 ll.replacelines(*args)
1131 1131
1132 1132 timer, fm = gettimer(ui, opts)
1133 1133 timer(d)
1134 1134 fm.end()
1135 1135
1136 1136 @command(b'perfrevrange', formatteropts)
1137 1137 def perfrevrange(ui, repo, *specs, **opts):
1138 1138 opts = _byteskwargs(opts)
1139 1139 timer, fm = gettimer(ui, opts)
1140 1140 revrange = scmutil.revrange
1141 1141 timer(lambda: len(revrange(repo, specs)))
1142 1142 fm.end()
1143 1143
1144 1144 @command(b'perfnodelookup', formatteropts)
1145 1145 def perfnodelookup(ui, repo, rev, **opts):
1146 1146 opts = _byteskwargs(opts)
1147 1147 timer, fm = gettimer(ui, opts)
1148 1148 import mercurial.revlog
1149 1149 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1150 1150 n = scmutil.revsingle(repo, rev).node()
1151 1151 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1152 1152 def d():
1153 1153 cl.rev(n)
1154 1154 clearcaches(cl)
1155 1155 timer(d)
1156 1156 fm.end()
1157 1157
1158 1158 @command(b'perflog',
1159 1159 [(b'', b'rename', False, b'ask log to follow renames')
1160 1160 ] + formatteropts)
1161 1161 def perflog(ui, repo, rev=None, **opts):
1162 1162 opts = _byteskwargs(opts)
1163 1163 if rev is None:
1164 1164 rev=[]
1165 1165 timer, fm = gettimer(ui, opts)
1166 1166 ui.pushbuffer()
1167 1167 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1168 1168 copies=opts.get(b'rename')))
1169 1169 ui.popbuffer()
1170 1170 fm.end()
1171 1171
1172 1172 @command(b'perfmoonwalk', formatteropts)
1173 1173 def perfmoonwalk(ui, repo, **opts):
1174 1174 """benchmark walking the changelog backwards
1175 1175
1176 1176 This also loads the changelog data for each revision in the changelog.
1177 1177 """
1178 1178 opts = _byteskwargs(opts)
1179 1179 timer, fm = gettimer(ui, opts)
1180 1180 def moonwalk():
1181 1181 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1182 1182 ctx = repo[i]
1183 1183 ctx.branch() # read changelog data (in addition to the index)
1184 1184 timer(moonwalk)
1185 1185 fm.end()
1186 1186
1187 1187 @command(b'perftemplating',
1188 1188 [(b'r', b'rev', [], b'revisions to run the template on'),
1189 1189 ] + formatteropts)
1190 1190 def perftemplating(ui, repo, testedtemplate=None, **opts):
1191 1191 """test the rendering time of a given template"""
1192 1192 if makelogtemplater is None:
1193 1193 raise error.Abort((b"perftemplating not available with this Mercurial"),
1194 1194 hint=b"use 4.3 or later")
1195 1195
1196 1196 opts = _byteskwargs(opts)
1197 1197
1198 1198 nullui = ui.copy()
1199 1199 nullui.fout = open(os.devnull, r'wb')
1200 1200 nullui.disablepager()
1201 1201 revs = opts.get(b'rev')
1202 1202 if not revs:
1203 1203 revs = [b'all()']
1204 1204 revs = list(scmutil.revrange(repo, revs))
1205 1205
1206 1206 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1207 1207 b' {author|person}: {desc|firstline}\n')
1208 1208 if testedtemplate is None:
1209 1209 testedtemplate = defaulttemplate
1210 1210 displayer = makelogtemplater(nullui, repo, testedtemplate)
1211 1211 def format():
1212 1212 for r in revs:
1213 1213 ctx = repo[r]
1214 1214 displayer.show(ctx)
1215 1215 displayer.flush(ctx)
1216 1216
1217 1217 timer, fm = gettimer(ui, opts)
1218 1218 timer(format)
1219 1219 fm.end()
1220 1220
1221 1221 @command(b'perfhelper-pathcopies', formatteropts +
1222 1222 [
1223 1223 (b'r', b'revs', [], b'restrict search to these revisions'),
1224 1224 (b'', b'timing', False, b'provides extra data (costly)'),
1225 1225 ])
1226 1226 def perfhelperpathcopies(ui, repo, revs=[], **opts):
1227 1227 """find statistic about potential parameters for the `perftracecopies`
1228 1228
1229 1229 This command find source-destination pair relevant for copytracing testing.
1230 1230 It report value for some of the parameters that impact copy tracing time.
1231 1231
1232 1232 If `--timing` is set, rename detection is run and the associated timing
1233 1233 will be reported. The extra details comes at the cost of a slower command
1234 1234 execution.
1235 1235
1236 1236 Since the rename detection is only run once, other factors might easily
1237 1237 affect the precision of the timing. However it should give a good
1238 1238 approximation of which revision pairs are very costly.
1239 1239 """
1240 1240 opts = _byteskwargs(opts)
1241 1241 fm = ui.formatter(b'perf', opts)
1242 1242 dotiming = opts[b'timing']
1243 1243
1244 1244 if dotiming:
1245 1245 header = '%12s %12s %12s %12s %12s %12s\n'
1246 1246 output = ("%(source)12s %(destination)12s "
1247 1247 "%(nbrevs)12d %(nbmissingfiles)12d "
1248 1248 "%(nbrenamedfiles)12d %(time)18.5f\n")
1249 1249 header_names = ("source", "destination", "nb-revs", "nb-files",
1250 1250 "nb-renames", "time")
1251 1251 fm.plain(header % header_names)
1252 1252 else:
1253 1253 header = '%12s %12s %12s %12s\n'
1254 1254 output = ("%(source)12s %(destination)12s "
1255 1255 "%(nbrevs)12d %(nbmissingfiles)12d\n")
1256 1256 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
1257 1257
1258 1258 if not revs:
1259 1259 revs = ['all()']
1260 1260 revs = scmutil.revrange(repo, revs)
1261 1261
1262 1262 roi = repo.revs('merge() and %ld', revs)
1263 1263 for r in roi:
1264 1264 ctx = repo[r]
1265 1265 p1 = ctx.p1().rev()
1266 1266 p2 = ctx.p2().rev()
1267 1267 bases = repo.changelog._commonancestorsheads(p1, p2)
1268 1268 for p in (p1, p2):
1269 1269 for b in bases:
1270 1270 base = repo[b]
1271 1271 parent = repo[p]
1272 1272 missing = copies._computeforwardmissing(base, parent)
1273 1273 if not missing:
1274 1274 continue
1275 1275 data = {
1276 1276 b'source': base.hex(),
1277 1277 b'destination': parent.hex(),
1278 1278 b'nbrevs': len(repo.revs('%d::%d', b, p)),
1279 1279 b'nbmissingfiles': len(missing),
1280 1280 }
1281 1281 if dotiming:
1282 1282 begin = util.timer()
1283 1283 renames = copies.pathcopies(base, parent)
1284 1284 end = util.timer()
1285 1285 # not very stable timing since we did only one run
1286 1286 data['time'] = end - begin
1287 1287 data['nbrenamedfiles'] = len(renames)
1288 1288 fm.startitem()
1289 1289 fm.data(**data)
1290 1290 out = data.copy()
1291 1291 out['source'] = fm.hexfunc(base.node())
1292 1292 out['destination'] = fm.hexfunc(parent.node())
1293 1293 fm.plain(output % out)
1294 1294
1295 1295 fm.end()
1296 1296
1297 1297 @command(b'perfcca', formatteropts)
1298 1298 def perfcca(ui, repo, **opts):
1299 1299 opts = _byteskwargs(opts)
1300 1300 timer, fm = gettimer(ui, opts)
1301 1301 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1302 1302 fm.end()
1303 1303
1304 1304 @command(b'perffncacheload', formatteropts)
1305 1305 def perffncacheload(ui, repo, **opts):
1306 1306 opts = _byteskwargs(opts)
1307 1307 timer, fm = gettimer(ui, opts)
1308 1308 s = repo.store
1309 1309 def d():
1310 1310 s.fncache._load()
1311 1311 timer(d)
1312 1312 fm.end()
1313 1313
1314 1314 @command(b'perffncachewrite', formatteropts)
1315 1315 def perffncachewrite(ui, repo, **opts):
1316 1316 opts = _byteskwargs(opts)
1317 1317 timer, fm = gettimer(ui, opts)
1318 1318 s = repo.store
1319 1319 lock = repo.lock()
1320 1320 s.fncache._load()
1321 1321 tr = repo.transaction(b'perffncachewrite')
1322 1322 tr.addbackup(b'fncache')
1323 1323 def d():
1324 1324 s.fncache._dirty = True
1325 1325 s.fncache.write(tr)
1326 1326 timer(d)
1327 1327 tr.close()
1328 1328 lock.release()
1329 1329 fm.end()
1330 1330
1331 1331 @command(b'perffncacheencode', formatteropts)
1332 1332 def perffncacheencode(ui, repo, **opts):
1333 1333 opts = _byteskwargs(opts)
1334 1334 timer, fm = gettimer(ui, opts)
1335 1335 s = repo.store
1336 1336 s.fncache._load()
1337 1337 def d():
1338 1338 for p in s.fncache.entries:
1339 1339 s.encode(p)
1340 1340 timer(d)
1341 1341 fm.end()
1342 1342
1343 1343 def _bdiffworker(q, blocks, xdiff, ready, done):
1344 1344 while not done.is_set():
1345 1345 pair = q.get()
1346 1346 while pair is not None:
1347 1347 if xdiff:
1348 1348 mdiff.bdiff.xdiffblocks(*pair)
1349 1349 elif blocks:
1350 1350 mdiff.bdiff.blocks(*pair)
1351 1351 else:
1352 1352 mdiff.textdiff(*pair)
1353 1353 q.task_done()
1354 1354 pair = q.get()
1355 1355 q.task_done() # for the None one
1356 1356 with ready:
1357 1357 ready.wait()
1358 1358
1359 1359 def _manifestrevision(repo, mnode):
1360 1360 ml = repo.manifestlog
1361 1361
1362 1362 if util.safehasattr(ml, b'getstorage'):
1363 1363 store = ml.getstorage(b'')
1364 1364 else:
1365 1365 store = ml._revlog
1366 1366
1367 1367 return store.revision(mnode)
1368 1368
1369 1369 @command(b'perfbdiff', revlogopts + formatteropts + [
1370 1370 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1371 1371 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1372 1372 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1373 1373 (b'', b'blocks', False, b'test computing diffs into blocks'),
1374 1374 (b'', b'xdiff', False, b'use xdiff algorithm'),
1375 1375 ],
1376 1376
1377 1377 b'-c|-m|FILE REV')
1378 1378 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1379 1379 """benchmark a bdiff between revisions
1380 1380
1381 1381 By default, benchmark a bdiff between its delta parent and itself.
1382 1382
1383 1383 With ``--count``, benchmark bdiffs between delta parents and self for N
1384 1384 revisions starting at the specified revision.
1385 1385
1386 1386 With ``--alldata``, assume the requested revision is a changeset and
1387 1387 measure bdiffs for all changes related to that changeset (manifest
1388 1388 and filelogs).
1389 1389 """
1390 1390 opts = _byteskwargs(opts)
1391 1391
1392 1392 if opts[b'xdiff'] and not opts[b'blocks']:
1393 1393 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1394 1394
1395 1395 if opts[b'alldata']:
1396 1396 opts[b'changelog'] = True
1397 1397
1398 1398 if opts.get(b'changelog') or opts.get(b'manifest'):
1399 1399 file_, rev = None, file_
1400 1400 elif rev is None:
1401 1401 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1402 1402
1403 1403 blocks = opts[b'blocks']
1404 1404 xdiff = opts[b'xdiff']
1405 1405 textpairs = []
1406 1406
1407 1407 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1408 1408
1409 1409 startrev = r.rev(r.lookup(rev))
1410 1410 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1411 1411 if opts[b'alldata']:
1412 1412 # Load revisions associated with changeset.
1413 1413 ctx = repo[rev]
1414 1414 mtext = _manifestrevision(repo, ctx.manifestnode())
1415 1415 for pctx in ctx.parents():
1416 1416 pman = _manifestrevision(repo, pctx.manifestnode())
1417 1417 textpairs.append((pman, mtext))
1418 1418
1419 1419 # Load filelog revisions by iterating manifest delta.
1420 1420 man = ctx.manifest()
1421 1421 pman = ctx.p1().manifest()
1422 1422 for filename, change in pman.diff(man).items():
1423 1423 fctx = repo.file(filename)
1424 1424 f1 = fctx.revision(change[0][0] or -1)
1425 1425 f2 = fctx.revision(change[1][0] or -1)
1426 1426 textpairs.append((f1, f2))
1427 1427 else:
1428 1428 dp = r.deltaparent(rev)
1429 1429 textpairs.append((r.revision(dp), r.revision(rev)))
1430 1430
1431 1431 withthreads = threads > 0
1432 1432 if not withthreads:
1433 1433 def d():
1434 1434 for pair in textpairs:
1435 1435 if xdiff:
1436 1436 mdiff.bdiff.xdiffblocks(*pair)
1437 1437 elif blocks:
1438 1438 mdiff.bdiff.blocks(*pair)
1439 1439 else:
1440 1440 mdiff.textdiff(*pair)
1441 1441 else:
1442 1442 q = queue()
1443 1443 for i in _xrange(threads):
1444 1444 q.put(None)
1445 1445 ready = threading.Condition()
1446 1446 done = threading.Event()
1447 1447 for i in _xrange(threads):
1448 1448 threading.Thread(target=_bdiffworker,
1449 1449 args=(q, blocks, xdiff, ready, done)).start()
1450 1450 q.join()
1451 1451 def d():
1452 1452 for pair in textpairs:
1453 1453 q.put(pair)
1454 1454 for i in _xrange(threads):
1455 1455 q.put(None)
1456 1456 with ready:
1457 1457 ready.notify_all()
1458 1458 q.join()
1459 1459 timer, fm = gettimer(ui, opts)
1460 1460 timer(d)
1461 1461 fm.end()
1462 1462
1463 1463 if withthreads:
1464 1464 done.set()
1465 1465 for i in _xrange(threads):
1466 1466 q.put(None)
1467 1467 with ready:
1468 1468 ready.notify_all()
1469 1469
1470 1470 @command(b'perfunidiff', revlogopts + formatteropts + [
1471 1471 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1472 1472 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
1473 1473 ], b'-c|-m|FILE REV')
1474 1474 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1475 1475 """benchmark a unified diff between revisions
1476 1476
1477 1477 This doesn't include any copy tracing - it's just a unified diff
1478 1478 of the texts.
1479 1479
1480 1480 By default, benchmark a diff between its delta parent and itself.
1481 1481
1482 1482 With ``--count``, benchmark diffs between delta parents and self for N
1483 1483 revisions starting at the specified revision.
1484 1484
1485 1485 With ``--alldata``, assume the requested revision is a changeset and
1486 1486 measure diffs for all changes related to that changeset (manifest
1487 1487 and filelogs).
1488 1488 """
1489 1489 opts = _byteskwargs(opts)
1490 1490 if opts[b'alldata']:
1491 1491 opts[b'changelog'] = True
1492 1492
1493 1493 if opts.get(b'changelog') or opts.get(b'manifest'):
1494 1494 file_, rev = None, file_
1495 1495 elif rev is None:
1496 1496 raise error.CommandError(b'perfunidiff', b'invalid arguments')
1497 1497
1498 1498 textpairs = []
1499 1499
1500 1500 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
1501 1501
1502 1502 startrev = r.rev(r.lookup(rev))
1503 1503 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1504 1504 if opts[b'alldata']:
1505 1505 # Load revisions associated with changeset.
1506 1506 ctx = repo[rev]
1507 1507 mtext = _manifestrevision(repo, ctx.manifestnode())
1508 1508 for pctx in ctx.parents():
1509 1509 pman = _manifestrevision(repo, pctx.manifestnode())
1510 1510 textpairs.append((pman, mtext))
1511 1511
1512 1512 # Load filelog revisions by iterating manifest delta.
1513 1513 man = ctx.manifest()
1514 1514 pman = ctx.p1().manifest()
1515 1515 for filename, change in pman.diff(man).items():
1516 1516 fctx = repo.file(filename)
1517 1517 f1 = fctx.revision(change[0][0] or -1)
1518 1518 f2 = fctx.revision(change[1][0] or -1)
1519 1519 textpairs.append((f1, f2))
1520 1520 else:
1521 1521 dp = r.deltaparent(rev)
1522 1522 textpairs.append((r.revision(dp), r.revision(rev)))
1523 1523
1524 1524 def d():
1525 1525 for left, right in textpairs:
1526 1526 # The date strings don't matter, so we pass empty strings.
1527 1527 headerlines, hunks = mdiff.unidiff(
1528 1528 left, b'', right, b'', b'left', b'right', binary=False)
1529 1529 # consume iterators in roughly the way patch.py does
1530 1530 b'\n'.join(headerlines)
1531 1531 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1532 1532 timer, fm = gettimer(ui, opts)
1533 1533 timer(d)
1534 1534 fm.end()
1535 1535
1536 1536 @command(b'perfdiffwd', formatteropts)
1537 1537 def perfdiffwd(ui, repo, **opts):
1538 1538 """Profile diff of working directory changes"""
1539 1539 opts = _byteskwargs(opts)
1540 1540 timer, fm = gettimer(ui, opts)
1541 1541 options = {
1542 1542 'w': 'ignore_all_space',
1543 1543 'b': 'ignore_space_change',
1544 1544 'B': 'ignore_blank_lines',
1545 1545 }
1546 1546
1547 1547 for diffopt in ('', 'w', 'b', 'B', 'wB'):
1548 1548 opts = dict((options[c], b'1') for c in diffopt)
1549 1549 def d():
1550 1550 ui.pushbuffer()
1551 1551 commands.diff(ui, repo, **opts)
1552 1552 ui.popbuffer()
1553 1553 diffopt = diffopt.encode('ascii')
1554 1554 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
1555 1555 timer(d, title=title)
1556 1556 fm.end()
1557 1557
1558 1558 @command(b'perfrevlogindex', revlogopts + formatteropts,
1559 1559 b'-c|-m|FILE')
1560 1560 def perfrevlogindex(ui, repo, file_=None, **opts):
1561 1561 """Benchmark operations against a revlog index.
1562 1562
1563 1563 This tests constructing a revlog instance, reading index data,
1564 1564 parsing index data, and performing various operations related to
1565 1565 index data.
1566 1566 """
1567 1567
1568 1568 opts = _byteskwargs(opts)
1569 1569
1570 1570 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
1571 1571
1572 1572 opener = getattr(rl, 'opener') # trick linter
1573 1573 indexfile = rl.indexfile
1574 1574 data = opener.read(indexfile)
1575 1575
1576 1576 header = struct.unpack(b'>I', data[0:4])[0]
1577 1577 version = header & 0xFFFF
1578 1578 if version == 1:
1579 1579 revlogio = revlog.revlogio()
1580 1580 inline = header & (1 << 16)
1581 1581 else:
1582 1582 raise error.Abort((b'unsupported revlog version: %d') % version)
1583 1583
1584 1584 rllen = len(rl)
1585 1585
1586 1586 node0 = rl.node(0)
1587 1587 node25 = rl.node(rllen // 4)
1588 1588 node50 = rl.node(rllen // 2)
1589 1589 node75 = rl.node(rllen // 4 * 3)
1590 1590 node100 = rl.node(rllen - 1)
1591 1591
1592 1592 allrevs = range(rllen)
1593 1593 allrevsrev = list(reversed(allrevs))
1594 1594 allnodes = [rl.node(rev) for rev in range(rllen)]
1595 1595 allnodesrev = list(reversed(allnodes))
1596 1596
1597 1597 def constructor():
1598 1598 revlog.revlog(opener, indexfile)
1599 1599
1600 1600 def read():
1601 1601 with opener(indexfile) as fh:
1602 1602 fh.read()
1603 1603
1604 1604 def parseindex():
1605 1605 revlogio.parseindex(data, inline)
1606 1606
1607 1607 def getentry(revornode):
1608 1608 index = revlogio.parseindex(data, inline)[0]
1609 1609 index[revornode]
1610 1610
1611 1611 def getentries(revs, count=1):
1612 1612 index = revlogio.parseindex(data, inline)[0]
1613 1613
1614 1614 for i in range(count):
1615 1615 for rev in revs:
1616 1616 index[rev]
1617 1617
1618 1618 def resolvenode(node):
1619 1619 nodemap = revlogio.parseindex(data, inline)[1]
1620 1620 # This only works for the C code.
1621 1621 if nodemap is None:
1622 1622 return
1623 1623
1624 1624 try:
1625 1625 nodemap[node]
1626 1626 except error.RevlogError:
1627 1627 pass
1628 1628
1629 1629 def resolvenodes(nodes, count=1):
1630 1630 nodemap = revlogio.parseindex(data, inline)[1]
1631 1631 if nodemap is None:
1632 1632 return
1633 1633
1634 1634 for i in range(count):
1635 1635 for node in nodes:
1636 1636 try:
1637 1637 nodemap[node]
1638 1638 except error.RevlogError:
1639 1639 pass
1640 1640
1641 1641 benches = [
1642 1642 (constructor, b'revlog constructor'),
1643 1643 (read, b'read'),
1644 1644 (parseindex, b'create index object'),
1645 1645 (lambda: getentry(0), b'retrieve index entry for rev 0'),
1646 1646 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
1647 1647 (lambda: resolvenode(node0), b'look up node at rev 0'),
1648 1648 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
1649 1649 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
1650 1650 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
1651 1651 (lambda: resolvenode(node100), b'look up node at tip'),
1652 1652 # 2x variation is to measure caching impact.
1653 1653 (lambda: resolvenodes(allnodes),
1654 1654 b'look up all nodes (forward)'),
1655 1655 (lambda: resolvenodes(allnodes, 2),
1656 1656 b'look up all nodes 2x (forward)'),
1657 1657 (lambda: resolvenodes(allnodesrev),
1658 1658 b'look up all nodes (reverse)'),
1659 1659 (lambda: resolvenodes(allnodesrev, 2),
1660 1660 b'look up all nodes 2x (reverse)'),
1661 1661 (lambda: getentries(allrevs),
1662 1662 b'retrieve all index entries (forward)'),
1663 1663 (lambda: getentries(allrevs, 2),
1664 1664 b'retrieve all index entries 2x (forward)'),
1665 1665 (lambda: getentries(allrevsrev),
1666 1666 b'retrieve all index entries (reverse)'),
1667 1667 (lambda: getentries(allrevsrev, 2),
1668 1668 b'retrieve all index entries 2x (reverse)'),
1669 1669 ]
1670 1670
1671 1671 for fn, title in benches:
1672 1672 timer, fm = gettimer(ui, opts)
1673 1673 timer(fn, title=title)
1674 1674 fm.end()
1675 1675
1676 1676 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
1677 1677 [(b'd', b'dist', 100, b'distance between the revisions'),
1678 1678 (b's', b'startrev', 0, b'revision to start reading at'),
1679 1679 (b'', b'reverse', False, b'read in reverse')],
1680 1680 b'-c|-m|FILE')
1681 1681 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
1682 1682 **opts):
1683 1683 """Benchmark reading a series of revisions from a revlog.
1684 1684
1685 1685 By default, we read every ``-d/--dist`` revision from 0 to tip of
1686 1686 the specified revlog.
1687 1687
1688 1688 The start revision can be defined via ``-s/--startrev``.
1689 1689 """
1690 1690 opts = _byteskwargs(opts)
1691 1691
1692 1692 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
1693 1693 rllen = getlen(ui)(rl)
1694 1694
1695 1695 if startrev < 0:
1696 1696 startrev = rllen + startrev
1697 1697
1698 1698 def d():
1699 1699 rl.clearcaches()
1700 1700
1701 1701 beginrev = startrev
1702 1702 endrev = rllen
1703 1703 dist = opts[b'dist']
1704 1704
1705 1705 if reverse:
1706 1706 beginrev, endrev = endrev - 1, beginrev - 1
1707 1707 dist = -1 * dist
1708 1708
1709 1709 for x in _xrange(beginrev, endrev, dist):
1710 1710 # Old revisions don't support passing int.
1711 1711 n = rl.node(x)
1712 1712 rl.revision(n)
1713 1713
1714 1714 timer, fm = gettimer(ui, opts)
1715 1715 timer(d)
1716 1716 fm.end()
1717 1717
1718 1718 @command(b'perfrevlogwrite', revlogopts + formatteropts +
1719 1719 [(b's', b'startrev', 1000, b'revision to start writing at'),
1720 1720 (b'', b'stoprev', -1, b'last revision to write'),
1721 1721 (b'', b'count', 3, b'last revision to write'),
1722 1722 (b'', b'details', False, b'print timing for every revisions tested'),
1723 1723 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
1724 1724 (b'', b'lazydeltabase', True, b'try the provided delta first'),
1725 1725 ],
1726 1726 b'-c|-m|FILE')
1727 1727 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
1728 1728 """Benchmark writing a series of revisions to a revlog.
1729 1729
1730 1730 Possible source values are:
1731 1731 * `full`: add from a full text (default).
1732 1732 * `parent-1`: add from a delta to the first parent
1733 1733 * `parent-2`: add from a delta to the second parent if it exists
1734 1734 (use a delta from the first parent otherwise)
1735 1735 * `parent-smallest`: add from the smallest delta (either p1 or p2)
1736 1736 * `storage`: add from the existing precomputed deltas
1737 1737 """
1738 1738 opts = _byteskwargs(opts)
1739 1739
1740 1740 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
1741 1741 rllen = getlen(ui)(rl)
1742 1742 if startrev < 0:
1743 1743 startrev = rllen + startrev
1744 1744 if stoprev < 0:
1745 1745 stoprev = rllen + stoprev
1746 1746
1747 1747 lazydeltabase = opts['lazydeltabase']
1748 1748 source = opts['source']
1749 1749 validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest',
1750 1750 b'storage')
1751 1751 if source not in validsource:
1752 1752 raise error.Abort('invalid source type: %s' % source)
1753 1753
1754 1754 ### actually gather results
1755 1755 count = opts['count']
1756 1756 if count <= 0:
1757 1757 raise error.Abort('invalide run count: %d' % count)
1758 1758 allresults = []
1759 1759 for c in range(count):
1760 1760 timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1,
1761 1761 lazydeltabase=lazydeltabase)
1762 1762 allresults.append(timing)
1763 1763
1764 1764 ### consolidate the results in a single list
1765 1765 results = []
1766 1766 for idx, (rev, t) in enumerate(allresults[0]):
1767 1767 ts = [t]
1768 1768 for other in allresults[1:]:
1769 1769 orev, ot = other[idx]
1770 1770 assert orev == rev
1771 1771 ts.append(ot)
1772 1772 results.append((rev, ts))
1773 1773 resultcount = len(results)
1774 1774
1775 1775 ### Compute and display relevant statistics
1776 1776
1777 1777 # get a formatter
1778 1778 fm = ui.formatter(b'perf', opts)
1779 1779 displayall = ui.configbool(b"perf", b"all-timing", False)
1780 1780
1781 1781 # print individual details if requested
1782 1782 if opts['details']:
1783 1783 for idx, item in enumerate(results, 1):
1784 1784 rev, data = item
1785 1785 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
1786 1786 formatone(fm, data, title=title, displayall=displayall)
1787 1787
1788 1788 # sorts results by median time
1789 1789 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
1790 1790 # list of (name, index) to display)
1791 1791 relevants = [
1792 1792 ("min", 0),
1793 1793 ("10%", resultcount * 10 // 100),
1794 1794 ("25%", resultcount * 25 // 100),
1795 1795 ("50%", resultcount * 70 // 100),
1796 1796 ("75%", resultcount * 75 // 100),
1797 1797 ("90%", resultcount * 90 // 100),
1798 1798 ("95%", resultcount * 95 // 100),
1799 1799 ("99%", resultcount * 99 // 100),
1800 1800 ("max", -1),
1801 1801 ]
1802 1802 if not ui.quiet:
1803 1803 for name, idx in relevants:
1804 1804 data = results[idx]
1805 1805 title = '%s of %d, rev %d' % (name, resultcount, data[0])
1806 1806 formatone(fm, data[1], title=title, displayall=displayall)
1807 1807
1808 1808 # XXX summing that many float will not be very precise, we ignore this fact
1809 1809 # for now
1810 1810 totaltime = []
1811 1811 for item in allresults:
1812 1812 totaltime.append((sum(x[1][0] for x in item),
1813 1813 sum(x[1][1] for x in item),
1814 1814 sum(x[1][2] for x in item),)
1815 1815 )
1816 1816 formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
1817 1817 displayall=displayall)
1818 1818 fm.end()
1819 1819
1820 1820 class _faketr(object):
1821 1821 def add(s, x, y, z=None):
1822 1822 return None
1823 1823
1824 1824 def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None,
1825 1825 lazydeltabase=True):
1826 1826 timings = []
1827 1827 tr = _faketr()
1828 1828 with _temprevlog(ui, orig, startrev) as dest:
1829 1829 dest._lazydeltabase = lazydeltabase
1830 1830 revs = list(orig.revs(startrev, stoprev))
1831 1831 total = len(revs)
1832 1832 topic = 'adding'
1833 1833 if runidx is not None:
1834 1834 topic += ' (run #%d)' % runidx
1835 1835 for idx, rev in enumerate(revs):
1836 1836 ui.progress(topic, idx, unit='revs', total=total)
1837 1837 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
1838 1838 with timeone() as r:
1839 1839 dest.addrawrevision(*addargs, **addkwargs)
1840 1840 timings.append((rev, r[0]))
1841 1841 ui.progress(topic, total, unit='revs', total=total)
1842 1842 ui.progress(topic, None, unit='revs', total=total)
1843 1843 return timings
1844 1844
1845 1845 def _getrevisionseed(orig, rev, tr, source):
1846 1846 from mercurial.node import nullid
1847 1847
1848 1848 linkrev = orig.linkrev(rev)
1849 1849 node = orig.node(rev)
1850 1850 p1, p2 = orig.parents(node)
1851 1851 flags = orig.flags(rev)
1852 1852 cachedelta = None
1853 1853 text = None
1854 1854
1855 1855 if source == b'full':
1856 1856 text = orig.revision(rev)
1857 1857 elif source == b'parent-1':
1858 1858 baserev = orig.rev(p1)
1859 1859 cachedelta = (baserev, orig.revdiff(p1, rev))
1860 1860 elif source == b'parent-2':
1861 1861 parent = p2
1862 1862 if p2 == nullid:
1863 1863 parent = p1
1864 1864 baserev = orig.rev(parent)
1865 1865 cachedelta = (baserev, orig.revdiff(parent, rev))
1866 1866 elif source == b'parent-smallest':
1867 1867 p1diff = orig.revdiff(p1, rev)
1868 1868 parent = p1
1869 1869 diff = p1diff
1870 1870 if p2 != nullid:
1871 1871 p2diff = orig.revdiff(p2, rev)
1872 1872 if len(p1diff) > len(p2diff):
1873 1873 parent = p2
1874 1874 diff = p2diff
1875 1875 baserev = orig.rev(parent)
1876 1876 cachedelta = (baserev, diff)
1877 1877 elif source == b'storage':
1878 1878 baserev = orig.deltaparent(rev)
1879 1879 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
1880 1880
1881 1881 return ((text, tr, linkrev, p1, p2),
1882 1882 {'node': node, 'flags': flags, 'cachedelta': cachedelta})
1883 1883
1884 1884 @contextlib.contextmanager
1885 1885 def _temprevlog(ui, orig, truncaterev):
1886 1886 from mercurial import vfs as vfsmod
1887 1887
1888 1888 if orig._inline:
1889 1889 raise error.Abort('not supporting inline revlog (yet)')
1890 1890
1891 1891 origindexpath = orig.opener.join(orig.indexfile)
1892 1892 origdatapath = orig.opener.join(orig.datafile)
1893 1893 indexname = 'revlog.i'
1894 1894 dataname = 'revlog.d'
1895 1895
1896 1896 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
1897 1897 try:
1898 1898 # copy the data file in a temporary directory
1899 1899 ui.debug('copying data in %s\n' % tmpdir)
1900 1900 destindexpath = os.path.join(tmpdir, 'revlog.i')
1901 1901 destdatapath = os.path.join(tmpdir, 'revlog.d')
1902 1902 shutil.copyfile(origindexpath, destindexpath)
1903 1903 shutil.copyfile(origdatapath, destdatapath)
1904 1904
1905 1905 # remove the data we want to add again
1906 1906 ui.debug('truncating data to be rewritten\n')
1907 1907 with open(destindexpath, 'ab') as index:
1908 1908 index.seek(0)
1909 1909 index.truncate(truncaterev * orig._io.size)
1910 1910 with open(destdatapath, 'ab') as data:
1911 1911 data.seek(0)
1912 1912 data.truncate(orig.start(truncaterev))
1913 1913
1914 1914 # instantiate a new revlog from the temporary copy
1915 1915 ui.debug('truncating adding to be rewritten\n')
1916 1916 vfs = vfsmod.vfs(tmpdir)
1917 1917 vfs.options = getattr(orig.opener, 'options', None)
1918 1918
1919 1919 dest = revlog.revlog(vfs,
1920 1920 indexfile=indexname,
1921 1921 datafile=dataname)
1922 1922 if dest._inline:
1923 1923 raise error.Abort('not supporting inline revlog (yet)')
1924 1924 # make sure internals are initialized
1925 1925 dest.revision(len(dest) - 1)
1926 1926 yield dest
1927 1927 del dest, vfs
1928 1928 finally:
1929 1929 shutil.rmtree(tmpdir, True)
1930 1930
1931 1931 @command(b'perfrevlogchunks', revlogopts + formatteropts +
1932 1932 [(b'e', b'engines', b'', b'compression engines to use'),
1933 1933 (b's', b'startrev', 0, b'revision to start at')],
1934 1934 b'-c|-m|FILE')
1935 1935 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
1936 1936 """Benchmark operations on revlog chunks.
1937 1937
1938 1938 Logically, each revlog is a collection of fulltext revisions. However,
1939 1939 stored within each revlog are "chunks" of possibly compressed data. This
1940 1940 data needs to be read and decompressed or compressed and written.
1941 1941
1942 1942 This command measures the time it takes to read+decompress and recompress
1943 1943 chunks in a revlog. It effectively isolates I/O and compression performance.
1944 1944 For measurements of higher-level operations like resolving revisions,
1945 1945 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
1946 1946 """
1947 1947 opts = _byteskwargs(opts)
1948 1948
1949 1949 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
1950 1950
1951 1951 # _chunkraw was renamed to _getsegmentforrevs.
1952 1952 try:
1953 1953 segmentforrevs = rl._getsegmentforrevs
1954 1954 except AttributeError:
1955 1955 segmentforrevs = rl._chunkraw
1956 1956
1957 1957 # Verify engines argument.
1958 1958 if engines:
1959 1959 engines = set(e.strip() for e in engines.split(b','))
1960 1960 for engine in engines:
1961 1961 try:
1962 1962 util.compressionengines[engine]
1963 1963 except KeyError:
1964 1964 raise error.Abort(b'unknown compression engine: %s' % engine)
1965 1965 else:
1966 1966 engines = []
1967 1967 for e in util.compengines:
1968 1968 engine = util.compengines[e]
1969 1969 try:
1970 1970 if engine.available():
1971 1971 engine.revlogcompressor().compress(b'dummy')
1972 1972 engines.append(e)
1973 1973 except NotImplementedError:
1974 1974 pass
1975 1975
1976 1976 revs = list(rl.revs(startrev, len(rl) - 1))
1977 1977
1978 1978 def rlfh(rl):
1979 1979 if rl._inline:
1980 1980 return getsvfs(repo)(rl.indexfile)
1981 1981 else:
1982 1982 return getsvfs(repo)(rl.datafile)
1983 1983
1984 1984 def doread():
1985 1985 rl.clearcaches()
1986 1986 for rev in revs:
1987 1987 segmentforrevs(rev, rev)
1988 1988
1989 1989 def doreadcachedfh():
1990 1990 rl.clearcaches()
1991 1991 fh = rlfh(rl)
1992 1992 for rev in revs:
1993 1993 segmentforrevs(rev, rev, df=fh)
1994 1994
1995 1995 def doreadbatch():
1996 1996 rl.clearcaches()
1997 1997 segmentforrevs(revs[0], revs[-1])
1998 1998
1999 1999 def doreadbatchcachedfh():
2000 2000 rl.clearcaches()
2001 2001 fh = rlfh(rl)
2002 2002 segmentforrevs(revs[0], revs[-1], df=fh)
2003 2003
2004 2004 def dochunk():
2005 2005 rl.clearcaches()
2006 2006 fh = rlfh(rl)
2007 2007 for rev in revs:
2008 2008 rl._chunk(rev, df=fh)
2009 2009
2010 2010 chunks = [None]
2011 2011
2012 2012 def dochunkbatch():
2013 2013 rl.clearcaches()
2014 2014 fh = rlfh(rl)
2015 2015 # Save chunks as a side-effect.
2016 2016 chunks[0] = rl._chunks(revs, df=fh)
2017 2017
2018 2018 def docompress(compressor):
2019 2019 rl.clearcaches()
2020 2020
2021 2021 try:
2022 2022 # Swap in the requested compression engine.
2023 2023 oldcompressor = rl._compressor
2024 2024 rl._compressor = compressor
2025 2025 for chunk in chunks[0]:
2026 2026 rl.compress(chunk)
2027 2027 finally:
2028 2028 rl._compressor = oldcompressor
2029 2029
2030 2030 benches = [
2031 2031 (lambda: doread(), b'read'),
2032 2032 (lambda: doreadcachedfh(), b'read w/ reused fd'),
2033 2033 (lambda: doreadbatch(), b'read batch'),
2034 2034 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
2035 2035 (lambda: dochunk(), b'chunk'),
2036 2036 (lambda: dochunkbatch(), b'chunk batch'),
2037 2037 ]
2038 2038
2039 2039 for engine in sorted(engines):
2040 2040 compressor = util.compengines[engine].revlogcompressor()
2041 2041 benches.append((functools.partial(docompress, compressor),
2042 2042 b'compress w/ %s' % engine))
2043 2043
2044 2044 for fn, title in benches:
2045 2045 timer, fm = gettimer(ui, opts)
2046 2046 timer(fn, title=title)
2047 2047 fm.end()
2048 2048
2049 2049 @command(b'perfrevlogrevision', revlogopts + formatteropts +
2050 2050 [(b'', b'cache', False, b'use caches instead of clearing')],
2051 2051 b'-c|-m|FILE REV')
2052 2052 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
2053 2053 """Benchmark obtaining a revlog revision.
2054 2054
2055 2055 Obtaining a revlog revision consists of roughly the following steps:
2056 2056
2057 2057 1. Compute the delta chain
2058 2058 2. Slice the delta chain if applicable
2059 2059 3. Obtain the raw chunks for that delta chain
2060 2060 4. Decompress each raw chunk
2061 2061 5. Apply binary patches to obtain fulltext
2062 2062 6. Verify hash of fulltext
2063 2063
2064 2064 This command measures the time spent in each of these phases.
2065 2065 """
2066 2066 opts = _byteskwargs(opts)
2067 2067
2068 2068 if opts.get(b'changelog') or opts.get(b'manifest'):
2069 2069 file_, rev = None, file_
2070 2070 elif rev is None:
2071 2071 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
2072 2072
2073 2073 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
2074 2074
2075 2075 # _chunkraw was renamed to _getsegmentforrevs.
2076 2076 try:
2077 2077 segmentforrevs = r._getsegmentforrevs
2078 2078 except AttributeError:
2079 2079 segmentforrevs = r._chunkraw
2080 2080
2081 2081 node = r.lookup(rev)
2082 2082 rev = r.rev(node)
2083 2083
2084 2084 def getrawchunks(data, chain):
2085 2085 start = r.start
2086 2086 length = r.length
2087 2087 inline = r._inline
2088 2088 iosize = r._io.size
2089 2089 buffer = util.buffer
2090 2090
2091 2091 chunks = []
2092 2092 ladd = chunks.append
2093 2093 for idx, item in enumerate(chain):
2094 2094 offset = start(item[0])
2095 2095 bits = data[idx]
2096 2096 for rev in item:
2097 2097 chunkstart = start(rev)
2098 2098 if inline:
2099 2099 chunkstart += (rev + 1) * iosize
2100 2100 chunklength = length(rev)
2101 2101 ladd(buffer(bits, chunkstart - offset, chunklength))
2102 2102
2103 2103 return chunks
2104 2104
2105 2105 def dodeltachain(rev):
2106 2106 if not cache:
2107 2107 r.clearcaches()
2108 2108 r._deltachain(rev)
2109 2109
2110 2110 def doread(chain):
2111 2111 if not cache:
2112 2112 r.clearcaches()
2113 2113 for item in slicedchain:
2114 2114 segmentforrevs(item[0], item[-1])
2115 2115
2116 2116 def doslice(r, chain, size):
2117 2117 for s in slicechunk(r, chain, targetsize=size):
2118 2118 pass
2119 2119
2120 2120 def dorawchunks(data, chain):
2121 2121 if not cache:
2122 2122 r.clearcaches()
2123 2123 getrawchunks(data, chain)
2124 2124
2125 2125 def dodecompress(chunks):
2126 2126 decomp = r.decompress
2127 2127 for chunk in chunks:
2128 2128 decomp(chunk)
2129 2129
2130 2130 def dopatch(text, bins):
2131 2131 if not cache:
2132 2132 r.clearcaches()
2133 2133 mdiff.patches(text, bins)
2134 2134
2135 2135 def dohash(text):
2136 2136 if not cache:
2137 2137 r.clearcaches()
2138 2138 r.checkhash(text, node, rev=rev)
2139 2139
2140 2140 def dorevision():
2141 2141 if not cache:
2142 2142 r.clearcaches()
2143 2143 r.revision(node)
2144 2144
2145 2145 try:
2146 2146 from mercurial.revlogutils.deltas import slicechunk
2147 2147 except ImportError:
2148 2148 slicechunk = getattr(revlog, '_slicechunk', None)
2149 2149
2150 2150 size = r.length(rev)
2151 2151 chain = r._deltachain(rev)[0]
2152 2152 if not getattr(r, '_withsparseread', False):
2153 2153 slicedchain = (chain,)
2154 2154 else:
2155 2155 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
2156 2156 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
2157 2157 rawchunks = getrawchunks(data, slicedchain)
2158 2158 bins = r._chunks(chain)
2159 2159 text = bytes(bins[0])
2160 2160 bins = bins[1:]
2161 2161 text = mdiff.patches(text, bins)
2162 2162
2163 2163 benches = [
2164 2164 (lambda: dorevision(), b'full'),
2165 2165 (lambda: dodeltachain(rev), b'deltachain'),
2166 2166 (lambda: doread(chain), b'read'),
2167 2167 ]
2168 2168
2169 2169 if getattr(r, '_withsparseread', False):
2170 2170 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
2171 2171 benches.append(slicing)
2172 2172
2173 2173 benches.extend([
2174 2174 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
2175 2175 (lambda: dodecompress(rawchunks), b'decompress'),
2176 2176 (lambda: dopatch(text, bins), b'patch'),
2177 2177 (lambda: dohash(text), b'hash'),
2178 2178 ])
2179 2179
2180 2180 timer, fm = gettimer(ui, opts)
2181 2181 for fn, title in benches:
2182 2182 timer(fn, title=title)
2183 2183 fm.end()
2184 2184
2185 2185 @command(b'perfrevset',
2186 2186 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
2187 2187 (b'', b'contexts', False, b'obtain changectx for each revision')]
2188 2188 + formatteropts, b"REVSET")
2189 2189 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
2190 2190 """benchmark the execution time of a revset
2191 2191
2192 2192 Use the --clean option if need to evaluate the impact of build volatile
2193 2193 revisions set cache on the revset execution. Volatile cache hold filtered
2194 2194 and obsolete related cache."""
2195 2195 opts = _byteskwargs(opts)
2196 2196
2197 2197 timer, fm = gettimer(ui, opts)
2198 2198 def d():
2199 2199 if clear:
2200 2200 repo.invalidatevolatilesets()
2201 2201 if contexts:
2202 2202 for ctx in repo.set(expr): pass
2203 2203 else:
2204 2204 for r in repo.revs(expr): pass
2205 2205 timer(d)
2206 2206 fm.end()
2207 2207
2208 2208 @command(b'perfvolatilesets',
2209 2209 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
2210 2210 ] + formatteropts)
2211 2211 def perfvolatilesets(ui, repo, *names, **opts):
2212 2212 """benchmark the computation of various volatile set
2213 2213
2214 2214 Volatile set computes element related to filtering and obsolescence."""
2215 2215 opts = _byteskwargs(opts)
2216 2216 timer, fm = gettimer(ui, opts)
2217 2217 repo = repo.unfiltered()
2218 2218
2219 2219 def getobs(name):
2220 2220 def d():
2221 2221 repo.invalidatevolatilesets()
2222 2222 if opts[b'clear_obsstore']:
2223 2223 clearfilecache(repo, b'obsstore')
2224 2224 obsolete.getrevs(repo, name)
2225 2225 return d
2226 2226
2227 2227 allobs = sorted(obsolete.cachefuncs)
2228 2228 if names:
2229 2229 allobs = [n for n in allobs if n in names]
2230 2230
2231 2231 for name in allobs:
2232 2232 timer(getobs(name), title=name)
2233 2233
2234 2234 def getfiltered(name):
2235 2235 def d():
2236 2236 repo.invalidatevolatilesets()
2237 2237 if opts[b'clear_obsstore']:
2238 2238 clearfilecache(repo, b'obsstore')
2239 2239 repoview.filterrevs(repo, name)
2240 2240 return d
2241 2241
2242 2242 allfilter = sorted(repoview.filtertable)
2243 2243 if names:
2244 2244 allfilter = [n for n in allfilter if n in names]
2245 2245
2246 2246 for name in allfilter:
2247 2247 timer(getfiltered(name), title=name)
2248 2248 fm.end()
2249 2249
2250 2250 @command(b'perfbranchmap',
2251 2251 [(b'f', b'full', False,
2252 2252 b'Includes build time of subset'),
2253 2253 (b'', b'clear-revbranch', False,
2254 2254 b'purge the revbranch cache between computation'),
2255 2255 ] + formatteropts)
2256 2256 def perfbranchmap(ui, repo, *filternames, **opts):
2257 2257 """benchmark the update of a branchmap
2258 2258
2259 2259 This benchmarks the full repo.branchmap() call with read and write disabled
2260 2260 """
2261 2261 opts = _byteskwargs(opts)
2262 2262 full = opts.get(b"full", False)
2263 2263 clear_revbranch = opts.get(b"clear_revbranch", False)
2264 2264 timer, fm = gettimer(ui, opts)
2265 2265 def getbranchmap(filtername):
2266 2266 """generate a benchmark function for the filtername"""
2267 2267 if filtername is None:
2268 2268 view = repo
2269 2269 else:
2270 2270 view = repo.filtered(filtername)
2271 2271 def d():
2272 2272 if clear_revbranch:
2273 2273 repo.revbranchcache()._clear()
2274 2274 if full:
2275 2275 view._branchcaches.clear()
2276 2276 else:
2277 2277 view._branchcaches.pop(filtername, None)
2278 2278 view.branchmap()
2279 2279 return d
2280 2280 # add filter in smaller subset to bigger subset
2281 2281 possiblefilters = set(repoview.filtertable)
2282 2282 if filternames:
2283 2283 possiblefilters &= set(filternames)
2284 2284 subsettable = getbranchmapsubsettable()
2285 2285 allfilters = []
2286 2286 while possiblefilters:
2287 2287 for name in possiblefilters:
2288 2288 subset = subsettable.get(name)
2289 2289 if subset not in possiblefilters:
2290 2290 break
2291 2291 else:
2292 2292 assert False, b'subset cycle %s!' % possiblefilters
2293 2293 allfilters.append(name)
2294 2294 possiblefilters.remove(name)
2295 2295
2296 2296 # warm the cache
2297 2297 if not full:
2298 2298 for name in allfilters:
2299 2299 repo.filtered(name).branchmap()
2300 2300 if not filternames or b'unfiltered' in filternames:
2301 2301 # add unfiltered
2302 2302 allfilters.append(None)
2303 2303
2304 2304 branchcacheread = safeattrsetter(branchmap, b'read')
2305 2305 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
2306 2306 branchcacheread.set(lambda repo: None)
2307 2307 branchcachewrite.set(lambda bc, repo: None)
2308 2308 try:
2309 2309 for name in allfilters:
2310 2310 printname = name
2311 2311 if name is None:
2312 2312 printname = b'unfiltered'
2313 2313 timer(getbranchmap(name), title=str(printname))
2314 2314 finally:
2315 2315 branchcacheread.restore()
2316 2316 branchcachewrite.restore()
2317 2317 fm.end()
2318 2318
2319 2319 @command(b'perfbranchmapupdate', [
2320 2320 (b'', b'base', [], b'subset of revision to start from'),
2321 2321 (b'', b'target', [], b'subset of revision to end with'),
2322 2322 (b'', b'clear-caches', False, b'clear cache between each runs')
2323 2323 ] + formatteropts)
2324 2324 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
2325 2325 """benchmark branchmap update from for <base> revs to <target> revs
2326 2326
2327 2327 If `--clear-caches` is passed, the following items will be reset before
2328 2328 each update:
2329 2329 * the changelog instance and associated indexes
2330 2330 * the rev-branch-cache instance
2331 2331
2332 2332 Examples:
2333 2333
2334 2334 # update for the one last revision
2335 2335 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
2336 2336
2337 2337 $ update for change coming with a new branch
2338 2338 $ hg perfbranchmapupdate --base 'stable' --target 'default'
2339 2339 """
2340 2340 from mercurial import branchmap
2341 2341 from mercurial import repoview
2342 2342 opts = _byteskwargs(opts)
2343 2343 timer, fm = gettimer(ui, opts)
2344 2344 clearcaches = opts[b'clear_caches']
2345 2345 unfi = repo.unfiltered()
2346 2346 x = [None] # used to pass data between closure
2347 2347
2348 2348 # we use a `list` here to avoid possible side effect from smartset
2349 2349 baserevs = list(scmutil.revrange(repo, base))
2350 2350 targetrevs = list(scmutil.revrange(repo, target))
2351 2351 if not baserevs:
2352 2352 raise error.Abort(b'no revisions selected for --base')
2353 2353 if not targetrevs:
2354 2354 raise error.Abort(b'no revisions selected for --target')
2355 2355
2356 2356 # make sure the target branchmap also contains the one in the base
2357 2357 targetrevs = list(set(baserevs) | set(targetrevs))
2358 2358 targetrevs.sort()
2359 2359
2360 2360 cl = repo.changelog
2361 2361 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
2362 2362 allbaserevs.sort()
2363 2363 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
2364 2364
2365 2365 newrevs = list(alltargetrevs.difference(allbaserevs))
2366 2366 newrevs.sort()
2367 2367
2368 2368 allrevs = frozenset(unfi.changelog.revs())
2369 2369 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
2370 2370 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
2371 2371
2372 2372 def basefilter(repo, visibilityexceptions=None):
2373 2373 return basefilterrevs
2374 2374
2375 2375 def targetfilter(repo, visibilityexceptions=None):
2376 2376 return targetfilterrevs
2377 2377
2378 2378 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
2379 2379 ui.status(msg % (len(allbaserevs), len(newrevs)))
2380 2380 if targetfilterrevs:
2381 2381 msg = b'(%d revisions still filtered)\n'
2382 2382 ui.status(msg % len(targetfilterrevs))
2383 2383
2384 2384 try:
2385 2385 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
2386 2386 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
2387 2387
2388 2388 baserepo = repo.filtered(b'__perf_branchmap_update_base')
2389 2389 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
2390 2390
2391 2391 # try to find an existing branchmap to reuse
2392 2392 subsettable = getbranchmapsubsettable()
2393 2393 candidatefilter = subsettable.get(None)
2394 2394 while candidatefilter is not None:
2395 2395 candidatebm = repo.filtered(candidatefilter).branchmap()
2396 2396 if candidatebm.validfor(baserepo):
2397 2397 filtered = repoview.filterrevs(repo, candidatefilter)
2398 2398 missing = [r for r in allbaserevs if r in filtered]
2399 2399 base = candidatebm.copy()
2400 2400 base.update(baserepo, missing)
2401 2401 break
2402 2402 candidatefilter = subsettable.get(candidatefilter)
2403 2403 else:
2404 2404 # no suitable subset where found
2405 2405 base = branchmap.branchcache()
2406 2406 base.update(baserepo, allbaserevs)
2407 2407
2408 2408 def setup():
2409 2409 x[0] = base.copy()
2410 2410 if clearcaches:
2411 2411 unfi._revbranchcache = None
2412 2412 clearchangelog(repo)
2413 2413
2414 2414 def bench():
2415 2415 x[0].update(targetrepo, newrevs)
2416 2416
2417 2417 timer(bench, setup=setup)
2418 2418 fm.end()
2419 2419 finally:
2420 2420 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
2421 2421 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
2422 2422
2423 2423 @command(b'perfbranchmapload', [
2424 2424 (b'f', b'filter', b'', b'Specify repoview filter'),
2425 2425 (b'', b'list', False, b'List brachmap filter caches'),
2426 2426 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
2427 2427
2428 2428 ] + formatteropts)
2429 2429 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
2430 2430 """benchmark reading the branchmap"""
2431 2431 opts = _byteskwargs(opts)
2432 2432 clearrevlogs = opts[b'clear_revlogs']
2433 2433
2434 2434 if list:
2435 2435 for name, kind, st in repo.cachevfs.readdir(stat=True):
2436 2436 if name.startswith(b'branch2'):
2437 2437 filtername = name.partition(b'-')[2] or b'unfiltered'
2438 2438 ui.status(b'%s - %s\n'
2439 2439 % (filtername, util.bytecount(st.st_size)))
2440 2440 return
2441 2441 if not filter:
2442 2442 filter = None
2443 2443 subsettable = getbranchmapsubsettable()
2444 2444 if filter is None:
2445 2445 repo = repo.unfiltered()
2446 2446 else:
2447 2447 repo = repoview.repoview(repo, filter)
2448 2448
2449 2449 repo.branchmap() # make sure we have a relevant, up to date branchmap
2450 2450
2451 2451 currentfilter = filter
2452 2452 # try once without timer, the filter may not be cached
2453 2453 while branchmap.read(repo) is None:
2454 2454 currentfilter = subsettable.get(currentfilter)
2455 2455 if currentfilter is None:
2456 2456 raise error.Abort(b'No branchmap cached for %s repo'
2457 2457 % (filter or b'unfiltered'))
2458 2458 repo = repo.filtered(currentfilter)
2459 2459 timer, fm = gettimer(ui, opts)
2460 2460 def setup():
2461 2461 if clearrevlogs:
2462 2462 clearchangelog(repo)
2463 2463 def bench():
2464 2464 branchmap.read(repo)
2465 2465 timer(bench, setup=setup)
2466 2466 fm.end()
2467 2467
2468 2468 @command(b'perfloadmarkers')
2469 2469 def perfloadmarkers(ui, repo):
2470 2470 """benchmark the time to parse the on-disk markers for a repo
2471 2471
2472 2472 Result is the number of markers in the repo."""
2473 2473 timer, fm = gettimer(ui)
2474 2474 svfs = getsvfs(repo)
2475 2475 timer(lambda: len(obsolete.obsstore(svfs)))
2476 2476 fm.end()
2477 2477
2478 2478 @command(b'perflrucachedict', formatteropts +
2479 2479 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
2480 2480 (b'', b'mincost', 0, b'smallest cost of items in cache'),
2481 2481 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
2482 2482 (b'', b'size', 4, b'size of cache'),
2483 2483 (b'', b'gets', 10000, b'number of key lookups'),
2484 2484 (b'', b'sets', 10000, b'number of key sets'),
2485 2485 (b'', b'mixed', 10000, b'number of mixed mode operations'),
2486 2486 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
2487 2487 norepo=True)
2488 2488 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
2489 2489 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
2490 2490 opts = _byteskwargs(opts)
2491 2491
2492 2492 def doinit():
2493 2493 for i in _xrange(10000):
2494 2494 util.lrucachedict(size)
2495 2495
2496 2496 costrange = list(range(mincost, maxcost + 1))
2497 2497
2498 2498 values = []
2499 2499 for i in _xrange(size):
2500 2500 values.append(random.randint(0, _maxint))
2501 2501
2502 2502 # Get mode fills the cache and tests raw lookup performance with no
2503 2503 # eviction.
2504 2504 getseq = []
2505 2505 for i in _xrange(gets):
2506 2506 getseq.append(random.choice(values))
2507 2507
2508 2508 def dogets():
2509 2509 d = util.lrucachedict(size)
2510 2510 for v in values:
2511 2511 d[v] = v
2512 2512 for key in getseq:
2513 2513 value = d[key]
2514 2514 value # silence pyflakes warning
2515 2515
2516 2516 def dogetscost():
2517 2517 d = util.lrucachedict(size, maxcost=costlimit)
2518 2518 for i, v in enumerate(values):
2519 2519 d.insert(v, v, cost=costs[i])
2520 2520 for key in getseq:
2521 2521 try:
2522 2522 value = d[key]
2523 2523 value # silence pyflakes warning
2524 2524 except KeyError:
2525 2525 pass
2526 2526
2527 2527 # Set mode tests insertion speed with cache eviction.
2528 2528 setseq = []
2529 2529 costs = []
2530 2530 for i in _xrange(sets):
2531 2531 setseq.append(random.randint(0, _maxint))
2532 2532 costs.append(random.choice(costrange))
2533 2533
2534 2534 def doinserts():
2535 2535 d = util.lrucachedict(size)
2536 2536 for v in setseq:
2537 2537 d.insert(v, v)
2538 2538
2539 2539 def doinsertscost():
2540 2540 d = util.lrucachedict(size, maxcost=costlimit)
2541 2541 for i, v in enumerate(setseq):
2542 2542 d.insert(v, v, cost=costs[i])
2543 2543
2544 2544 def dosets():
2545 2545 d = util.lrucachedict(size)
2546 2546 for v in setseq:
2547 2547 d[v] = v
2548 2548
2549 2549 # Mixed mode randomly performs gets and sets with eviction.
2550 2550 mixedops = []
2551 2551 for i in _xrange(mixed):
2552 2552 r = random.randint(0, 100)
2553 2553 if r < mixedgetfreq:
2554 2554 op = 0
2555 2555 else:
2556 2556 op = 1
2557 2557
2558 2558 mixedops.append((op,
2559 2559 random.randint(0, size * 2),
2560 2560 random.choice(costrange)))
2561 2561
2562 2562 def domixed():
2563 2563 d = util.lrucachedict(size)
2564 2564
2565 2565 for op, v, cost in mixedops:
2566 2566 if op == 0:
2567 2567 try:
2568 2568 d[v]
2569 2569 except KeyError:
2570 2570 pass
2571 2571 else:
2572 2572 d[v] = v
2573 2573
2574 2574 def domixedcost():
2575 2575 d = util.lrucachedict(size, maxcost=costlimit)
2576 2576
2577 2577 for op, v, cost in mixedops:
2578 2578 if op == 0:
2579 2579 try:
2580 2580 d[v]
2581 2581 except KeyError:
2582 2582 pass
2583 2583 else:
2584 2584 d.insert(v, v, cost=cost)
2585 2585
2586 2586 benches = [
2587 2587 (doinit, b'init'),
2588 2588 ]
2589 2589
2590 2590 if costlimit:
2591 2591 benches.extend([
2592 2592 (dogetscost, b'gets w/ cost limit'),
2593 2593 (doinsertscost, b'inserts w/ cost limit'),
2594 2594 (domixedcost, b'mixed w/ cost limit'),
2595 2595 ])
2596 2596 else:
2597 2597 benches.extend([
2598 2598 (dogets, b'gets'),
2599 2599 (doinserts, b'inserts'),
2600 2600 (dosets, b'sets'),
2601 2601 (domixed, b'mixed')
2602 2602 ])
2603 2603
2604 2604 for fn, title in benches:
2605 2605 timer, fm = gettimer(ui, opts)
2606 2606 timer(fn, title=title)
2607 2607 fm.end()
2608 2608
2609 2609 @command(b'perfwrite', formatteropts)
2610 2610 def perfwrite(ui, repo, **opts):
2611 2611 """microbenchmark ui.write
2612 2612 """
2613 2613 opts = _byteskwargs(opts)
2614 2614
2615 2615 timer, fm = gettimer(ui, opts)
2616 2616 def write():
2617 2617 for i in range(100000):
2618 2618 ui.write((b'Testing write performance\n'))
2619 2619 timer(write)
2620 2620 fm.end()
2621 2621
2622 2622 def uisetup(ui):
2623 2623 if (util.safehasattr(cmdutil, b'openrevlog') and
2624 2624 not util.safehasattr(commands, b'debugrevlogopts')):
2625 2625 # for "historical portability":
2626 2626 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
2627 2627 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
2628 2628 # openrevlog() should cause failure, because it has been
2629 2629 # available since 3.5 (or 49c583ca48c4).
2630 2630 def openrevlog(orig, repo, cmd, file_, opts):
2631 2631 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
2632 2632 raise error.Abort(b"This version doesn't support --dir option",
2633 2633 hint=b"use 3.5 or later")
2634 2634 return orig(repo, cmd, file_, opts)
2635 2635 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
2636 2636
2637 2637 @command(b'perfprogress', formatteropts + [
2638 2638 (b'', b'topic', b'topic', b'topic for progress messages'),
2639 2639 (b'c', b'total', 1000000, b'total value we are progressing to'),
2640 2640 ], norepo=True)
2641 2641 def perfprogress(ui, topic=None, total=None, **opts):
2642 2642 """printing of progress bars"""
2643 2643 opts = _byteskwargs(opts)
2644 2644
2645 2645 timer, fm = gettimer(ui, opts)
2646 2646
2647 2647 def doprogress():
2648 2648 with ui.makeprogress(topic, total=total) as progress:
2649 2649 for i in pycompat.xrange(total):
2650 2650 progress.increment()
2651 2651
2652 2652 timer(doprogress)
2653 2653 fm.end()
General Comments 0
You need to be logged in to leave comments. Login now