##// END OF EJS Templates
perf: rename version flag of perfchangegroupchangelog to cgversion...
Pulkit Goyal -
r40749:cfaf3843 default
parent child Browse files
Show More
@@ -1,2434 +1,2434 b''
1 1 # perf.py - performance test routines
2 2 '''helper extension to measure performance'''
3 3
4 4 # "historical portability" policy of perf.py:
5 5 #
6 6 # We have to do:
7 7 # - make perf.py "loadable" with as wide Mercurial version as possible
8 8 # This doesn't mean that perf commands work correctly with that Mercurial.
9 9 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
10 10 # - make historical perf command work correctly with as wide Mercurial
11 11 # version as possible
12 12 #
13 13 # We have to do, if possible with reasonable cost:
14 14 # - make recent perf command for historical feature work correctly
15 15 # with early Mercurial
16 16 #
17 17 # We don't have to do:
18 18 # - make perf command for recent feature work correctly with early
19 19 # Mercurial
20 20
21 21 from __future__ import absolute_import
22 22 import contextlib
23 23 import functools
24 24 import gc
25 25 import os
26 26 import random
27 27 import shutil
28 28 import struct
29 29 import sys
30 30 import tempfile
31 31 import threading
32 32 import time
33 33 from mercurial import (
34 34 changegroup,
35 35 cmdutil,
36 36 commands,
37 37 copies,
38 38 error,
39 39 extensions,
40 40 mdiff,
41 41 merge,
42 42 revlog,
43 43 util,
44 44 )
45 45
46 46 # for "historical portability":
47 47 # try to import modules separately (in dict order), and ignore
48 48 # failure, because these aren't available with early Mercurial
49 49 try:
50 50 from mercurial import branchmap # since 2.5 (or bcee63733aad)
51 51 except ImportError:
52 52 pass
53 53 try:
54 54 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
55 55 except ImportError:
56 56 pass
57 57 try:
58 58 from mercurial import registrar # since 3.7 (or 37d50250b696)
59 59 dir(registrar) # forcibly load it
60 60 except ImportError:
61 61 registrar = None
62 62 try:
63 63 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
64 64 except ImportError:
65 65 pass
66 66 try:
67 67 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
68 68 except ImportError:
69 69 pass
70 70
71 71 def identity(a):
72 72 return a
73 73
74 74 try:
75 75 from mercurial import pycompat
76 76 getargspec = pycompat.getargspec # added to module after 4.5
77 77 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
78 78 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
79 79 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
80 80 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
81 81 if pycompat.ispy3:
82 82 _maxint = sys.maxsize # per py3 docs for replacing maxint
83 83 else:
84 84 _maxint = sys.maxint
85 85 except (ImportError, AttributeError):
86 86 import inspect
87 87 getargspec = inspect.getargspec
88 88 _byteskwargs = identity
89 89 fsencode = identity # no py3 support
90 90 _maxint = sys.maxint # no py3 support
91 91 _sysstr = lambda x: x # no py3 support
92 92 _xrange = xrange
93 93
94 94 try:
95 95 # 4.7+
96 96 queue = pycompat.queue.Queue
97 97 except (AttributeError, ImportError):
98 98 # <4.7.
99 99 try:
100 100 queue = pycompat.queue
101 101 except (AttributeError, ImportError):
102 102 queue = util.queue
103 103
104 104 try:
105 105 from mercurial import logcmdutil
106 106 makelogtemplater = logcmdutil.maketemplater
107 107 except (AttributeError, ImportError):
108 108 try:
109 109 makelogtemplater = cmdutil.makelogtemplater
110 110 except (AttributeError, ImportError):
111 111 makelogtemplater = None
112 112
113 113 # for "historical portability":
114 114 # define util.safehasattr forcibly, because util.safehasattr has been
115 115 # available since 1.9.3 (or 94b200a11cf7)
116 116 _undefined = object()
117 117 def safehasattr(thing, attr):
118 118 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
119 119 setattr(util, 'safehasattr', safehasattr)
120 120
121 121 # for "historical portability":
122 122 # define util.timer forcibly, because util.timer has been available
123 123 # since ae5d60bb70c9
124 124 if safehasattr(time, 'perf_counter'):
125 125 util.timer = time.perf_counter
126 126 elif os.name == b'nt':
127 127 util.timer = time.clock
128 128 else:
129 129 util.timer = time.time
130 130
131 131 # for "historical portability":
132 132 # use locally defined empty option list, if formatteropts isn't
133 133 # available, because commands.formatteropts has been available since
134 134 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
135 135 # available since 2.2 (or ae5f92e154d3)
136 136 formatteropts = getattr(cmdutil, "formatteropts",
137 137 getattr(commands, "formatteropts", []))
138 138
139 139 # for "historical portability":
140 140 # use locally defined option list, if debugrevlogopts isn't available,
141 141 # because commands.debugrevlogopts has been available since 3.7 (or
142 142 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
143 143 # since 1.9 (or a79fea6b3e77).
144 144 revlogopts = getattr(cmdutil, "debugrevlogopts",
145 145 getattr(commands, "debugrevlogopts", [
146 146 (b'c', b'changelog', False, (b'open changelog')),
147 147 (b'm', b'manifest', False, (b'open manifest')),
148 148 (b'', b'dir', False, (b'open directory manifest')),
149 149 ]))
150 150
151 151 cmdtable = {}
152 152
153 153 # for "historical portability":
154 154 # define parsealiases locally, because cmdutil.parsealiases has been
155 155 # available since 1.5 (or 6252852b4332)
156 156 def parsealiases(cmd):
157 157 return cmd.split(b"|")
158 158
159 159 if safehasattr(registrar, 'command'):
160 160 command = registrar.command(cmdtable)
161 161 elif safehasattr(cmdutil, 'command'):
162 162 command = cmdutil.command(cmdtable)
163 163 if b'norepo' not in getargspec(command).args:
164 164 # for "historical portability":
165 165 # wrap original cmdutil.command, because "norepo" option has
166 166 # been available since 3.1 (or 75a96326cecb)
167 167 _command = command
168 168 def command(name, options=(), synopsis=None, norepo=False):
169 169 if norepo:
170 170 commands.norepo += b' %s' % b' '.join(parsealiases(name))
171 171 return _command(name, list(options), synopsis)
172 172 else:
173 173 # for "historical portability":
174 174 # define "@command" annotation locally, because cmdutil.command
175 175 # has been available since 1.9 (or 2daa5179e73f)
176 176 def command(name, options=(), synopsis=None, norepo=False):
177 177 def decorator(func):
178 178 if synopsis:
179 179 cmdtable[name] = func, list(options), synopsis
180 180 else:
181 181 cmdtable[name] = func, list(options)
182 182 if norepo:
183 183 commands.norepo += b' %s' % b' '.join(parsealiases(name))
184 184 return func
185 185 return decorator
186 186
187 187 try:
188 188 import mercurial.registrar
189 189 import mercurial.configitems
190 190 configtable = {}
191 191 configitem = mercurial.registrar.configitem(configtable)
192 192 configitem(b'perf', b'presleep',
193 193 default=mercurial.configitems.dynamicdefault,
194 194 )
195 195 configitem(b'perf', b'stub',
196 196 default=mercurial.configitems.dynamicdefault,
197 197 )
198 198 configitem(b'perf', b'parentscount',
199 199 default=mercurial.configitems.dynamicdefault,
200 200 )
201 201 configitem(b'perf', b'all-timing',
202 202 default=mercurial.configitems.dynamicdefault,
203 203 )
204 204 except (ImportError, AttributeError):
205 205 pass
206 206
207 207 def getlen(ui):
208 208 if ui.configbool(b"perf", b"stub", False):
209 209 return lambda x: 1
210 210 return len
211 211
212 212 def gettimer(ui, opts=None):
213 213 """return a timer function and formatter: (timer, formatter)
214 214
215 215 This function exists to gather the creation of formatter in a single
216 216 place instead of duplicating it in all performance commands."""
217 217
218 218 # enforce an idle period before execution to counteract power management
219 219 # experimental config: perf.presleep
220 220 time.sleep(getint(ui, b"perf", b"presleep", 1))
221 221
222 222 if opts is None:
223 223 opts = {}
224 224 # redirect all to stderr unless buffer api is in use
225 225 if not ui._buffers:
226 226 ui = ui.copy()
227 227 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
228 228 if uifout:
229 229 # for "historical portability":
230 230 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
231 231 uifout.set(ui.ferr)
232 232
233 233 # get a formatter
234 234 uiformatter = getattr(ui, 'formatter', None)
235 235 if uiformatter:
236 236 fm = uiformatter(b'perf', opts)
237 237 else:
238 238 # for "historical portability":
239 239 # define formatter locally, because ui.formatter has been
240 240 # available since 2.2 (or ae5f92e154d3)
241 241 from mercurial import node
242 242 class defaultformatter(object):
243 243 """Minimized composition of baseformatter and plainformatter
244 244 """
245 245 def __init__(self, ui, topic, opts):
246 246 self._ui = ui
247 247 if ui.debugflag:
248 248 self.hexfunc = node.hex
249 249 else:
250 250 self.hexfunc = node.short
251 251 def __nonzero__(self):
252 252 return False
253 253 __bool__ = __nonzero__
254 254 def startitem(self):
255 255 pass
256 256 def data(self, **data):
257 257 pass
258 258 def write(self, fields, deftext, *fielddata, **opts):
259 259 self._ui.write(deftext % fielddata, **opts)
260 260 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
261 261 if cond:
262 262 self._ui.write(deftext % fielddata, **opts)
263 263 def plain(self, text, **opts):
264 264 self._ui.write(text, **opts)
265 265 def end(self):
266 266 pass
267 267 fm = defaultformatter(ui, b'perf', opts)
268 268
269 269 # stub function, runs code only once instead of in a loop
270 270 # experimental config: perf.stub
271 271 if ui.configbool(b"perf", b"stub", False):
272 272 return functools.partial(stub_timer, fm), fm
273 273
274 274 # experimental config: perf.all-timing
275 275 displayall = ui.configbool(b"perf", b"all-timing", False)
276 276 return functools.partial(_timer, fm, displayall=displayall), fm
277 277
278 278 def stub_timer(fm, func, setup=None, title=None):
279 279 func()
280 280
281 281 @contextlib.contextmanager
282 282 def timeone():
283 283 r = []
284 284 ostart = os.times()
285 285 cstart = util.timer()
286 286 yield r
287 287 cstop = util.timer()
288 288 ostop = os.times()
289 289 a, b = ostart, ostop
290 290 r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
291 291
292 292 def _timer(fm, func, setup=None, title=None, displayall=False):
293 293 gc.collect()
294 294 results = []
295 295 begin = util.timer()
296 296 count = 0
297 297 while True:
298 298 if setup is not None:
299 299 setup()
300 300 with timeone() as item:
301 301 r = func()
302 302 count += 1
303 303 results.append(item[0])
304 304 cstop = util.timer()
305 305 if cstop - begin > 3 and count >= 100:
306 306 break
307 307 if cstop - begin > 10 and count >= 3:
308 308 break
309 309
310 310 formatone(fm, results, title=title, result=r,
311 311 displayall=displayall)
312 312
313 313 def formatone(fm, timings, title=None, result=None, displayall=False):
314 314
315 315 count = len(timings)
316 316
317 317 fm.startitem()
318 318
319 319 if title:
320 320 fm.write(b'title', b'! %s\n', title)
321 321 if result:
322 322 fm.write(b'result', b'! result: %s\n', result)
323 323 def display(role, entry):
324 324 prefix = b''
325 325 if role != b'best':
326 326 prefix = b'%s.' % role
327 327 fm.plain(b'!')
328 328 fm.write(prefix + b'wall', b' wall %f', entry[0])
329 329 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
330 330 fm.write(prefix + b'user', b' user %f', entry[1])
331 331 fm.write(prefix + b'sys', b' sys %f', entry[2])
332 332 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
333 333 fm.plain(b'\n')
334 334 timings.sort()
335 335 min_val = timings[0]
336 336 display(b'best', min_val)
337 337 if displayall:
338 338 max_val = timings[-1]
339 339 display(b'max', max_val)
340 340 avg = tuple([sum(x) / count for x in zip(*timings)])
341 341 display(b'avg', avg)
342 342 median = timings[len(timings) // 2]
343 343 display(b'median', median)
344 344
345 345 # utilities for historical portability
346 346
347 347 def getint(ui, section, name, default):
348 348 # for "historical portability":
349 349 # ui.configint has been available since 1.9 (or fa2b596db182)
350 350 v = ui.config(section, name, None)
351 351 if v is None:
352 352 return default
353 353 try:
354 354 return int(v)
355 355 except ValueError:
356 356 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
357 357 % (section, name, v))
358 358
359 359 def safeattrsetter(obj, name, ignoremissing=False):
360 360 """Ensure that 'obj' has 'name' attribute before subsequent setattr
361 361
362 362 This function is aborted, if 'obj' doesn't have 'name' attribute
363 363 at runtime. This avoids overlooking removal of an attribute, which
364 364 breaks assumption of performance measurement, in the future.
365 365
366 366 This function returns the object to (1) assign a new value, and
367 367 (2) restore an original value to the attribute.
368 368
369 369 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
370 370 abortion, and this function returns None. This is useful to
371 371 examine an attribute, which isn't ensured in all Mercurial
372 372 versions.
373 373 """
374 374 if not util.safehasattr(obj, name):
375 375 if ignoremissing:
376 376 return None
377 377 raise error.Abort((b"missing attribute %s of %s might break assumption"
378 378 b" of performance measurement") % (name, obj))
379 379
380 380 origvalue = getattr(obj, _sysstr(name))
381 381 class attrutil(object):
382 382 def set(self, newvalue):
383 383 setattr(obj, _sysstr(name), newvalue)
384 384 def restore(self):
385 385 setattr(obj, _sysstr(name), origvalue)
386 386
387 387 return attrutil()
388 388
389 389 # utilities to examine each internal API changes
390 390
391 391 def getbranchmapsubsettable():
392 392 # for "historical portability":
393 393 # subsettable is defined in:
394 394 # - branchmap since 2.9 (or 175c6fd8cacc)
395 395 # - repoview since 2.5 (or 59a9f18d4587)
396 396 for mod in (branchmap, repoview):
397 397 subsettable = getattr(mod, 'subsettable', None)
398 398 if subsettable:
399 399 return subsettable
400 400
401 401 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
402 402 # branchmap and repoview modules exist, but subsettable attribute
403 403 # doesn't)
404 404 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
405 405 hint=b"use 2.5 or later")
406 406
407 407 def getsvfs(repo):
408 408 """Return appropriate object to access files under .hg/store
409 409 """
410 410 # for "historical portability":
411 411 # repo.svfs has been available since 2.3 (or 7034365089bf)
412 412 svfs = getattr(repo, 'svfs', None)
413 413 if svfs:
414 414 return svfs
415 415 else:
416 416 return getattr(repo, 'sopener')
417 417
418 418 def getvfs(repo):
419 419 """Return appropriate object to access files under .hg
420 420 """
421 421 # for "historical portability":
422 422 # repo.vfs has been available since 2.3 (or 7034365089bf)
423 423 vfs = getattr(repo, 'vfs', None)
424 424 if vfs:
425 425 return vfs
426 426 else:
427 427 return getattr(repo, 'opener')
428 428
429 429 def repocleartagscachefunc(repo):
430 430 """Return the function to clear tags cache according to repo internal API
431 431 """
432 432 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
433 433 # in this case, setattr(repo, '_tagscache', None) or so isn't
434 434 # correct way to clear tags cache, because existing code paths
435 435 # expect _tagscache to be a structured object.
436 436 def clearcache():
437 437 # _tagscache has been filteredpropertycache since 2.5 (or
438 438 # 98c867ac1330), and delattr() can't work in such case
439 439 if b'_tagscache' in vars(repo):
440 440 del repo.__dict__[b'_tagscache']
441 441 return clearcache
442 442
443 443 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
444 444 if repotags: # since 1.4 (or 5614a628d173)
445 445 return lambda : repotags.set(None)
446 446
447 447 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
448 448 if repotagscache: # since 0.6 (or d7df759d0e97)
449 449 return lambda : repotagscache.set(None)
450 450
451 451 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
452 452 # this point, but it isn't so problematic, because:
453 453 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
454 454 # in perftags() causes failure soon
455 455 # - perf.py itself has been available since 1.1 (or eb240755386d)
456 456 raise error.Abort((b"tags API of this hg command is unknown"))
457 457
458 458 # utilities to clear cache
459 459
460 460 def clearfilecache(obj, attrname):
461 461 unfiltered = getattr(obj, 'unfiltered', None)
462 462 if unfiltered is not None:
463 463 obj = obj.unfiltered()
464 464 if attrname in vars(obj):
465 465 delattr(obj, attrname)
466 466 obj._filecache.pop(attrname, None)
467 467
468 468 def clearchangelog(repo):
469 469 if repo is not repo.unfiltered():
470 470 object.__setattr__(repo, r'_clcachekey', None)
471 471 object.__setattr__(repo, r'_clcache', None)
472 472 clearfilecache(repo.unfiltered(), 'changelog')
473 473
474 474 # perf commands
475 475
476 476 @command(b'perfwalk', formatteropts)
477 477 def perfwalk(ui, repo, *pats, **opts):
478 478 opts = _byteskwargs(opts)
479 479 timer, fm = gettimer(ui, opts)
480 480 m = scmutil.match(repo[None], pats, {})
481 481 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
482 482 ignored=False))))
483 483 fm.end()
484 484
485 485 @command(b'perfannotate', formatteropts)
486 486 def perfannotate(ui, repo, f, **opts):
487 487 opts = _byteskwargs(opts)
488 488 timer, fm = gettimer(ui, opts)
489 489 fc = repo[b'.'][f]
490 490 timer(lambda: len(fc.annotate(True)))
491 491 fm.end()
492 492
493 493 @command(b'perfstatus',
494 494 [(b'u', b'unknown', False,
495 495 b'ask status to look for unknown files')] + formatteropts)
496 496 def perfstatus(ui, repo, **opts):
497 497 opts = _byteskwargs(opts)
498 498 #m = match.always(repo.root, repo.getcwd())
499 499 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
500 500 # False))))
501 501 timer, fm = gettimer(ui, opts)
502 502 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
503 503 fm.end()
504 504
505 505 @command(b'perfaddremove', formatteropts)
506 506 def perfaddremove(ui, repo, **opts):
507 507 opts = _byteskwargs(opts)
508 508 timer, fm = gettimer(ui, opts)
509 509 try:
510 510 oldquiet = repo.ui.quiet
511 511 repo.ui.quiet = True
512 512 matcher = scmutil.match(repo[None])
513 513 opts[b'dry_run'] = True
514 514 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
515 515 finally:
516 516 repo.ui.quiet = oldquiet
517 517 fm.end()
518 518
519 519 def clearcaches(cl):
520 520 # behave somewhat consistently across internal API changes
521 521 if util.safehasattr(cl, b'clearcaches'):
522 522 cl.clearcaches()
523 523 elif util.safehasattr(cl, b'_nodecache'):
524 524 from mercurial.node import nullid, nullrev
525 525 cl._nodecache = {nullid: nullrev}
526 526 cl._nodepos = None
527 527
528 528 @command(b'perfheads', formatteropts)
529 529 def perfheads(ui, repo, **opts):
530 530 opts = _byteskwargs(opts)
531 531 timer, fm = gettimer(ui, opts)
532 532 cl = repo.changelog
533 533 def d():
534 534 len(cl.headrevs())
535 535 clearcaches(cl)
536 536 timer(d)
537 537 fm.end()
538 538
539 539 @command(b'perftags', formatteropts)
540 540 def perftags(ui, repo, **opts):
541 541 import mercurial.changelog
542 542 import mercurial.manifest
543 543
544 544 opts = _byteskwargs(opts)
545 545 timer, fm = gettimer(ui, opts)
546 546 svfs = getsvfs(repo)
547 547 repocleartagscache = repocleartagscachefunc(repo)
548 548 def s():
549 549 repo.changelog = mercurial.changelog.changelog(svfs)
550 550 rootmanifest = mercurial.manifest.manifestrevlog(svfs)
551 551 repo.manifestlog = mercurial.manifest.manifestlog(svfs, repo,
552 552 rootmanifest)
553 553 repocleartagscache()
554 554 def t():
555 555 return len(repo.tags())
556 556 timer(t, setup=s)
557 557 fm.end()
558 558
559 559 @command(b'perfancestors', formatteropts)
560 560 def perfancestors(ui, repo, **opts):
561 561 opts = _byteskwargs(opts)
562 562 timer, fm = gettimer(ui, opts)
563 563 heads = repo.changelog.headrevs()
564 564 def d():
565 565 for a in repo.changelog.ancestors(heads):
566 566 pass
567 567 timer(d)
568 568 fm.end()
569 569
570 570 @command(b'perfancestorset', formatteropts)
571 571 def perfancestorset(ui, repo, revset, **opts):
572 572 opts = _byteskwargs(opts)
573 573 timer, fm = gettimer(ui, opts)
574 574 revs = repo.revs(revset)
575 575 heads = repo.changelog.headrevs()
576 576 def d():
577 577 s = repo.changelog.ancestors(heads)
578 578 for rev in revs:
579 579 rev in s
580 580 timer(d)
581 581 fm.end()
582 582
583 583 @command(b'perfbookmarks', formatteropts)
584 584 def perfbookmarks(ui, repo, **opts):
585 585 """benchmark parsing bookmarks from disk to memory"""
586 586 opts = _byteskwargs(opts)
587 587 timer, fm = gettimer(ui, opts)
588 588
589 589 def s():
590 590 clearfilecache(repo, b'_bookmarks')
591 591 def d():
592 592 repo._bookmarks
593 593 timer(d, setup=s)
594 594 fm.end()
595 595
596 596 @command(b'perfbundleread', formatteropts, b'BUNDLE')
597 597 def perfbundleread(ui, repo, bundlepath, **opts):
598 598 """Benchmark reading of bundle files.
599 599
600 600 This command is meant to isolate the I/O part of bundle reading as
601 601 much as possible.
602 602 """
603 603 from mercurial import (
604 604 bundle2,
605 605 exchange,
606 606 streamclone,
607 607 )
608 608
609 609 opts = _byteskwargs(opts)
610 610
611 611 def makebench(fn):
612 612 def run():
613 613 with open(bundlepath, b'rb') as fh:
614 614 bundle = exchange.readbundle(ui, fh, bundlepath)
615 615 fn(bundle)
616 616
617 617 return run
618 618
619 619 def makereadnbytes(size):
620 620 def run():
621 621 with open(bundlepath, b'rb') as fh:
622 622 bundle = exchange.readbundle(ui, fh, bundlepath)
623 623 while bundle.read(size):
624 624 pass
625 625
626 626 return run
627 627
628 628 def makestdioread(size):
629 629 def run():
630 630 with open(bundlepath, b'rb') as fh:
631 631 while fh.read(size):
632 632 pass
633 633
634 634 return run
635 635
636 636 # bundle1
637 637
638 638 def deltaiter(bundle):
639 639 for delta in bundle.deltaiter():
640 640 pass
641 641
642 642 def iterchunks(bundle):
643 643 for chunk in bundle.getchunks():
644 644 pass
645 645
646 646 # bundle2
647 647
648 648 def forwardchunks(bundle):
649 649 for chunk in bundle._forwardchunks():
650 650 pass
651 651
652 652 def iterparts(bundle):
653 653 for part in bundle.iterparts():
654 654 pass
655 655
656 656 def iterpartsseekable(bundle):
657 657 for part in bundle.iterparts(seekable=True):
658 658 pass
659 659
660 660 def seek(bundle):
661 661 for part in bundle.iterparts(seekable=True):
662 662 part.seek(0, os.SEEK_END)
663 663
664 664 def makepartreadnbytes(size):
665 665 def run():
666 666 with open(bundlepath, b'rb') as fh:
667 667 bundle = exchange.readbundle(ui, fh, bundlepath)
668 668 for part in bundle.iterparts():
669 669 while part.read(size):
670 670 pass
671 671
672 672 return run
673 673
674 674 benches = [
675 675 (makestdioread(8192), b'read(8k)'),
676 676 (makestdioread(16384), b'read(16k)'),
677 677 (makestdioread(32768), b'read(32k)'),
678 678 (makestdioread(131072), b'read(128k)'),
679 679 ]
680 680
681 681 with open(bundlepath, b'rb') as fh:
682 682 bundle = exchange.readbundle(ui, fh, bundlepath)
683 683
684 684 if isinstance(bundle, changegroup.cg1unpacker):
685 685 benches.extend([
686 686 (makebench(deltaiter), b'cg1 deltaiter()'),
687 687 (makebench(iterchunks), b'cg1 getchunks()'),
688 688 (makereadnbytes(8192), b'cg1 read(8k)'),
689 689 (makereadnbytes(16384), b'cg1 read(16k)'),
690 690 (makereadnbytes(32768), b'cg1 read(32k)'),
691 691 (makereadnbytes(131072), b'cg1 read(128k)'),
692 692 ])
693 693 elif isinstance(bundle, bundle2.unbundle20):
694 694 benches.extend([
695 695 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
696 696 (makebench(iterparts), b'bundle2 iterparts()'),
697 697 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
698 698 (makebench(seek), b'bundle2 part seek()'),
699 699 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
700 700 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
701 701 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
702 702 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
703 703 ])
704 704 elif isinstance(bundle, streamclone.streamcloneapplier):
705 705 raise error.Abort(b'stream clone bundles not supported')
706 706 else:
707 707 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
708 708
709 709 for fn, title in benches:
710 710 timer, fm = gettimer(ui, opts)
711 711 timer(fn, title=title)
712 712 fm.end()
713 713
714 714 @command(b'perfchangegroupchangelog', formatteropts +
715 [(b'', b'version', b'02', b'changegroup version'),
715 [(b'', b'cgversion', b'02', b'changegroup version'),
716 716 (b'r', b'rev', b'', b'revisions to add to changegroup')])
717 def perfchangegroupchangelog(ui, repo, version=b'02', rev=None, **opts):
717 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
718 718 """Benchmark producing a changelog group for a changegroup.
719 719
720 720 This measures the time spent processing the changelog during a
721 721 bundle operation. This occurs during `hg bundle` and on a server
722 722 processing a `getbundle` wire protocol request (handles clones
723 723 and pull requests).
724 724
725 725 By default, all revisions are added to the changegroup.
726 726 """
727 727 opts = _byteskwargs(opts)
728 728 cl = repo.changelog
729 729 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
730 bundler = changegroup.getbundler(version, repo)
730 bundler = changegroup.getbundler(cgversion, repo)
731 731
732 732 def d():
733 733 state, chunks = bundler._generatechangelog(cl, nodes)
734 734 for chunk in chunks:
735 735 pass
736 736
737 737 timer, fm = gettimer(ui, opts)
738 738
739 739 # Terminal printing can interfere with timing. So disable it.
740 740 with ui.configoverride({(b'progress', b'disable'): True}):
741 741 timer(d)
742 742
743 743 fm.end()
744 744
745 745 @command(b'perfdirs', formatteropts)
746 746 def perfdirs(ui, repo, **opts):
747 747 opts = _byteskwargs(opts)
748 748 timer, fm = gettimer(ui, opts)
749 749 dirstate = repo.dirstate
750 750 b'a' in dirstate
751 751 def d():
752 752 dirstate.hasdir(b'a')
753 753 del dirstate._map._dirs
754 754 timer(d)
755 755 fm.end()
756 756
757 757 @command(b'perfdirstate', formatteropts)
758 758 def perfdirstate(ui, repo, **opts):
759 759 opts = _byteskwargs(opts)
760 760 timer, fm = gettimer(ui, opts)
761 761 b"a" in repo.dirstate
762 762 def d():
763 763 repo.dirstate.invalidate()
764 764 b"a" in repo.dirstate
765 765 timer(d)
766 766 fm.end()
767 767
768 768 @command(b'perfdirstatedirs', formatteropts)
769 769 def perfdirstatedirs(ui, repo, **opts):
770 770 opts = _byteskwargs(opts)
771 771 timer, fm = gettimer(ui, opts)
772 772 b"a" in repo.dirstate
773 773 def d():
774 774 repo.dirstate.hasdir(b"a")
775 775 del repo.dirstate._map._dirs
776 776 timer(d)
777 777 fm.end()
778 778
779 779 @command(b'perfdirstatefoldmap', formatteropts)
780 780 def perfdirstatefoldmap(ui, repo, **opts):
781 781 opts = _byteskwargs(opts)
782 782 timer, fm = gettimer(ui, opts)
783 783 dirstate = repo.dirstate
784 784 b'a' in dirstate
785 785 def d():
786 786 dirstate._map.filefoldmap.get(b'a')
787 787 del dirstate._map.filefoldmap
788 788 timer(d)
789 789 fm.end()
790 790
791 791 @command(b'perfdirfoldmap', formatteropts)
792 792 def perfdirfoldmap(ui, repo, **opts):
793 793 opts = _byteskwargs(opts)
794 794 timer, fm = gettimer(ui, opts)
795 795 dirstate = repo.dirstate
796 796 b'a' in dirstate
797 797 def d():
798 798 dirstate._map.dirfoldmap.get(b'a')
799 799 del dirstate._map.dirfoldmap
800 800 del dirstate._map._dirs
801 801 timer(d)
802 802 fm.end()
803 803
804 804 @command(b'perfdirstatewrite', formatteropts)
805 805 def perfdirstatewrite(ui, repo, **opts):
806 806 opts = _byteskwargs(opts)
807 807 timer, fm = gettimer(ui, opts)
808 808 ds = repo.dirstate
809 809 b"a" in ds
810 810 def d():
811 811 ds._dirty = True
812 812 ds.write(repo.currenttransaction())
813 813 timer(d)
814 814 fm.end()
815 815
816 816 @command(b'perfmergecalculate',
817 817 [(b'r', b'rev', b'.', b'rev to merge against')] + formatteropts)
818 818 def perfmergecalculate(ui, repo, rev, **opts):
819 819 opts = _byteskwargs(opts)
820 820 timer, fm = gettimer(ui, opts)
821 821 wctx = repo[None]
822 822 rctx = scmutil.revsingle(repo, rev, rev)
823 823 ancestor = wctx.ancestor(rctx)
824 824 # we don't want working dir files to be stat'd in the benchmark, so prime
825 825 # that cache
826 826 wctx.dirty()
827 827 def d():
828 828 # acceptremote is True because we don't want prompts in the middle of
829 829 # our benchmark
830 830 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
831 831 acceptremote=True, followcopies=True)
832 832 timer(d)
833 833 fm.end()
834 834
835 835 @command(b'perfpathcopies', [], b"REV REV")
836 836 def perfpathcopies(ui, repo, rev1, rev2, **opts):
837 837 opts = _byteskwargs(opts)
838 838 timer, fm = gettimer(ui, opts)
839 839 ctx1 = scmutil.revsingle(repo, rev1, rev1)
840 840 ctx2 = scmutil.revsingle(repo, rev2, rev2)
841 841 def d():
842 842 copies.pathcopies(ctx1, ctx2)
843 843 timer(d)
844 844 fm.end()
845 845
846 846 @command(b'perfphases',
847 847 [(b'', b'full', False, b'include file reading time too'),
848 848 ], b"")
849 849 def perfphases(ui, repo, **opts):
850 850 """benchmark phasesets computation"""
851 851 opts = _byteskwargs(opts)
852 852 timer, fm = gettimer(ui, opts)
853 853 _phases = repo._phasecache
854 854 full = opts.get(b'full')
855 855 def d():
856 856 phases = _phases
857 857 if full:
858 858 clearfilecache(repo, b'_phasecache')
859 859 phases = repo._phasecache
860 860 phases.invalidate()
861 861 phases.loadphaserevs(repo)
862 862 timer(d)
863 863 fm.end()
864 864
865 865 @command(b'perfphasesremote',
866 866 [], b"[DEST]")
867 867 def perfphasesremote(ui, repo, dest=None, **opts):
868 868 """benchmark time needed to analyse phases of the remote server"""
869 869 from mercurial.node import (
870 870 bin,
871 871 )
872 872 from mercurial import (
873 873 exchange,
874 874 hg,
875 875 phases,
876 876 )
877 877 opts = _byteskwargs(opts)
878 878 timer, fm = gettimer(ui, opts)
879 879
880 880 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
881 881 if not path:
882 882 raise error.Abort((b'default repository not configured!'),
883 883 hint=(b"see 'hg help config.paths'"))
884 884 dest = path.pushloc or path.loc
885 885 branches = (path.branch, opts.get(b'branch') or [])
886 886 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
887 887 revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get(b'rev'))
888 888 other = hg.peer(repo, opts, dest)
889 889
890 890 # easier to perform discovery through the operation
891 891 op = exchange.pushoperation(repo, other)
892 892 exchange._pushdiscoverychangeset(op)
893 893
894 894 remotesubset = op.fallbackheads
895 895
896 896 with other.commandexecutor() as e:
897 897 remotephases = e.callcommand(b'listkeys',
898 898 {b'namespace': b'phases'}).result()
899 899 del other
900 900 publishing = remotephases.get(b'publishing', False)
901 901 if publishing:
902 902 ui.status((b'publishing: yes\n'))
903 903 else:
904 904 ui.status((b'publishing: no\n'))
905 905
906 906 nodemap = repo.changelog.nodemap
907 907 nonpublishroots = 0
908 908 for nhex, phase in remotephases.iteritems():
909 909 if nhex == b'publishing': # ignore data related to publish option
910 910 continue
911 911 node = bin(nhex)
912 912 if node in nodemap and int(phase):
913 913 nonpublishroots += 1
914 914 ui.status((b'number of roots: %d\n') % len(remotephases))
915 915 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
916 916 def d():
917 917 phases.remotephasessummary(repo,
918 918 remotesubset,
919 919 remotephases)
920 920 timer(d)
921 921 fm.end()
922 922
923 923 @command(b'perfmanifest',[
924 924 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
925 925 (b'', b'clear-disk', False, b'clear on-disk caches too'),
926 926 ] + formatteropts, b'REV|NODE')
927 927 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
928 928 """benchmark the time to read a manifest from disk and return a usable
929 929 dict-like object
930 930
931 931 Manifest caches are cleared before retrieval."""
932 932 opts = _byteskwargs(opts)
933 933 timer, fm = gettimer(ui, opts)
934 934 if not manifest_rev:
935 935 ctx = scmutil.revsingle(repo, rev, rev)
936 936 t = ctx.manifestnode()
937 937 else:
938 938 from mercurial.node import bin
939 939
940 940 if len(rev) == 40:
941 941 t = bin(rev)
942 942 else:
943 943 try:
944 944 rev = int(rev)
945 945
946 946 if util.safehasattr(repo.manifestlog, b'getstorage'):
947 947 t = repo.manifestlog.getstorage(b'').node(rev)
948 948 else:
949 949 t = repo.manifestlog._revlog.lookup(rev)
950 950 except ValueError:
951 951 raise error.Abort(b'manifest revision must be integer or full '
952 952 b'node')
953 953 def d():
954 954 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
955 955 repo.manifestlog[t].read()
956 956 timer(d)
957 957 fm.end()
958 958
959 959 @command(b'perfchangeset', formatteropts)
960 960 def perfchangeset(ui, repo, rev, **opts):
961 961 opts = _byteskwargs(opts)
962 962 timer, fm = gettimer(ui, opts)
963 963 n = scmutil.revsingle(repo, rev).node()
964 964 def d():
965 965 repo.changelog.read(n)
966 966 #repo.changelog._cache = None
967 967 timer(d)
968 968 fm.end()
969 969
970 970 @command(b'perfindex', formatteropts)
971 971 def perfindex(ui, repo, **opts):
972 972 import mercurial.revlog
973 973 opts = _byteskwargs(opts)
974 974 timer, fm = gettimer(ui, opts)
975 975 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
976 976 n = repo[b"tip"].node()
977 977 svfs = getsvfs(repo)
978 978 def d():
979 979 cl = mercurial.revlog.revlog(svfs, b"00changelog.i")
980 980 cl.rev(n)
981 981 timer(d)
982 982 fm.end()
983 983
984 984 @command(b'perfstartup', formatteropts)
985 985 def perfstartup(ui, repo, **opts):
986 986 opts = _byteskwargs(opts)
987 987 timer, fm = gettimer(ui, opts)
988 988 def d():
989 989 if os.name != r'nt':
990 990 os.system(b"HGRCPATH= %s version -q > /dev/null" %
991 991 fsencode(sys.argv[0]))
992 992 else:
993 993 os.environ[r'HGRCPATH'] = r' '
994 994 os.system(r"%s version -q > NUL" % sys.argv[0])
995 995 timer(d)
996 996 fm.end()
997 997
998 998 @command(b'perfparents', formatteropts)
999 999 def perfparents(ui, repo, **opts):
1000 1000 opts = _byteskwargs(opts)
1001 1001 timer, fm = gettimer(ui, opts)
1002 1002 # control the number of commits perfparents iterates over
1003 1003 # experimental config: perf.parentscount
1004 1004 count = getint(ui, b"perf", b"parentscount", 1000)
1005 1005 if len(repo.changelog) < count:
1006 1006 raise error.Abort(b"repo needs %d commits for this test" % count)
1007 1007 repo = repo.unfiltered()
1008 1008 nl = [repo.changelog.node(i) for i in _xrange(count)]
1009 1009 def d():
1010 1010 for n in nl:
1011 1011 repo.changelog.parents(n)
1012 1012 timer(d)
1013 1013 fm.end()
1014 1014
1015 1015 @command(b'perfctxfiles', formatteropts)
1016 1016 def perfctxfiles(ui, repo, x, **opts):
1017 1017 opts = _byteskwargs(opts)
1018 1018 x = int(x)
1019 1019 timer, fm = gettimer(ui, opts)
1020 1020 def d():
1021 1021 len(repo[x].files())
1022 1022 timer(d)
1023 1023 fm.end()
1024 1024
1025 1025 @command(b'perfrawfiles', formatteropts)
1026 1026 def perfrawfiles(ui, repo, x, **opts):
1027 1027 opts = _byteskwargs(opts)
1028 1028 x = int(x)
1029 1029 timer, fm = gettimer(ui, opts)
1030 1030 cl = repo.changelog
1031 1031 def d():
1032 1032 len(cl.read(x)[3])
1033 1033 timer(d)
1034 1034 fm.end()
1035 1035
1036 1036 @command(b'perflookup', formatteropts)
1037 1037 def perflookup(ui, repo, rev, **opts):
1038 1038 opts = _byteskwargs(opts)
1039 1039 timer, fm = gettimer(ui, opts)
1040 1040 timer(lambda: len(repo.lookup(rev)))
1041 1041 fm.end()
1042 1042
1043 1043 @command(b'perflinelogedits',
1044 1044 [(b'n', b'edits', 10000, b'number of edits'),
1045 1045 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1046 1046 ], norepo=True)
1047 1047 def perflinelogedits(ui, **opts):
1048 1048 from mercurial import linelog
1049 1049
1050 1050 opts = _byteskwargs(opts)
1051 1051
1052 1052 edits = opts[b'edits']
1053 1053 maxhunklines = opts[b'max_hunk_lines']
1054 1054
1055 1055 maxb1 = 100000
1056 1056 random.seed(0)
1057 1057 randint = random.randint
1058 1058 currentlines = 0
1059 1059 arglist = []
1060 1060 for rev in _xrange(edits):
1061 1061 a1 = randint(0, currentlines)
1062 1062 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1063 1063 b1 = randint(0, maxb1)
1064 1064 b2 = randint(b1, b1 + maxhunklines)
1065 1065 currentlines += (b2 - b1) - (a2 - a1)
1066 1066 arglist.append((rev, a1, a2, b1, b2))
1067 1067
1068 1068 def d():
1069 1069 ll = linelog.linelog()
1070 1070 for args in arglist:
1071 1071 ll.replacelines(*args)
1072 1072
1073 1073 timer, fm = gettimer(ui, opts)
1074 1074 timer(d)
1075 1075 fm.end()
1076 1076
1077 1077 @command(b'perfrevrange', formatteropts)
1078 1078 def perfrevrange(ui, repo, *specs, **opts):
1079 1079 opts = _byteskwargs(opts)
1080 1080 timer, fm = gettimer(ui, opts)
1081 1081 revrange = scmutil.revrange
1082 1082 timer(lambda: len(revrange(repo, specs)))
1083 1083 fm.end()
1084 1084
1085 1085 @command(b'perfnodelookup', formatteropts)
1086 1086 def perfnodelookup(ui, repo, rev, **opts):
1087 1087 opts = _byteskwargs(opts)
1088 1088 timer, fm = gettimer(ui, opts)
1089 1089 import mercurial.revlog
1090 1090 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1091 1091 n = scmutil.revsingle(repo, rev).node()
1092 1092 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1093 1093 def d():
1094 1094 cl.rev(n)
1095 1095 clearcaches(cl)
1096 1096 timer(d)
1097 1097 fm.end()
1098 1098
1099 1099 @command(b'perflog',
1100 1100 [(b'', b'rename', False, b'ask log to follow renames')
1101 1101 ] + formatteropts)
1102 1102 def perflog(ui, repo, rev=None, **opts):
1103 1103 opts = _byteskwargs(opts)
1104 1104 if rev is None:
1105 1105 rev=[]
1106 1106 timer, fm = gettimer(ui, opts)
1107 1107 ui.pushbuffer()
1108 1108 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1109 1109 copies=opts.get(b'rename')))
1110 1110 ui.popbuffer()
1111 1111 fm.end()
1112 1112
1113 1113 @command(b'perfmoonwalk', formatteropts)
1114 1114 def perfmoonwalk(ui, repo, **opts):
1115 1115 """benchmark walking the changelog backwards
1116 1116
1117 1117 This also loads the changelog data for each revision in the changelog.
1118 1118 """
1119 1119 opts = _byteskwargs(opts)
1120 1120 timer, fm = gettimer(ui, opts)
1121 1121 def moonwalk():
1122 1122 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1123 1123 ctx = repo[i]
1124 1124 ctx.branch() # read changelog data (in addition to the index)
1125 1125 timer(moonwalk)
1126 1126 fm.end()
1127 1127
1128 1128 @command(b'perftemplating',
1129 1129 [(b'r', b'rev', [], b'revisions to run the template on'),
1130 1130 ] + formatteropts)
1131 1131 def perftemplating(ui, repo, testedtemplate=None, **opts):
1132 1132 """test the rendering time of a given template"""
1133 1133 if makelogtemplater is None:
1134 1134 raise error.Abort((b"perftemplating not available with this Mercurial"),
1135 1135 hint=b"use 4.3 or later")
1136 1136
1137 1137 opts = _byteskwargs(opts)
1138 1138
1139 1139 nullui = ui.copy()
1140 1140 nullui.fout = open(os.devnull, r'wb')
1141 1141 nullui.disablepager()
1142 1142 revs = opts.get(b'rev')
1143 1143 if not revs:
1144 1144 revs = [b'all()']
1145 1145 revs = list(scmutil.revrange(repo, revs))
1146 1146
1147 1147 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1148 1148 b' {author|person}: {desc|firstline}\n')
1149 1149 if testedtemplate is None:
1150 1150 testedtemplate = defaulttemplate
1151 1151 displayer = makelogtemplater(nullui, repo, testedtemplate)
1152 1152 def format():
1153 1153 for r in revs:
1154 1154 ctx = repo[r]
1155 1155 displayer.show(ctx)
1156 1156 displayer.flush(ctx)
1157 1157
1158 1158 timer, fm = gettimer(ui, opts)
1159 1159 timer(format)
1160 1160 fm.end()
1161 1161
1162 1162 @command(b'perfhelper-tracecopies', formatteropts +
1163 1163 [
1164 1164 (b'r', b'revs', [], b'restrict search to these revisions'),
1165 1165 ])
1166 1166 def perfhelpertracecopies(ui, repo, revs=[], **opts):
1167 1167 """find statistic about potential parameters for the `perftracecopies`
1168 1168
1169 1169 This command find source-destination pair relevant for copytracing testing.
1170 1170 It report value for some of the parameters that impact copy tracing time.
1171 1171 """
1172 1172 opts = _byteskwargs(opts)
1173 1173 fm = ui.formatter(b'perf', opts)
1174 1174 header = '%12s %12s %12s %12s\n'
1175 1175 output = ("%(source)12s %(destination)12s "
1176 1176 "%(nbrevs)12d %(nbmissingfiles)12d\n")
1177 1177 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
1178 1178
1179 1179 if not revs:
1180 1180 revs = ['all()']
1181 1181 revs = scmutil.revrange(repo, revs)
1182 1182
1183 1183 roi = repo.revs('merge() and %ld', revs)
1184 1184 for r in roi:
1185 1185 ctx = repo[r]
1186 1186 p1 = ctx.p1().rev()
1187 1187 p2 = ctx.p2().rev()
1188 1188 bases = repo.changelog._commonancestorsheads(p1, p2)
1189 1189 for p in (p1, p2):
1190 1190 for b in bases:
1191 1191 base = repo[b]
1192 1192 parent = repo[p]
1193 1193 missing = copies._computeforwardmissing(base, parent)
1194 1194 if not missing:
1195 1195 continue
1196 1196 fm.startitem()
1197 1197 data = {
1198 1198 b'source': base.hex(),
1199 1199 b'destination': parent.hex(),
1200 1200 b'nbrevs': len(repo.revs('%d::%d', b, p)),
1201 1201 b'nbmissingfiles': len(missing),
1202 1202 }
1203 1203 fm.data(**data)
1204 1204 out = data.copy()
1205 1205 out['source'] = fm.hexfunc(base.node())
1206 1206 out['destination'] = fm.hexfunc(parent.node())
1207 1207 fm.plain(output % out)
1208 1208 fm.end()
1209 1209
1210 1210 @command(b'perfcca', formatteropts)
1211 1211 def perfcca(ui, repo, **opts):
1212 1212 opts = _byteskwargs(opts)
1213 1213 timer, fm = gettimer(ui, opts)
1214 1214 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1215 1215 fm.end()
1216 1216
1217 1217 @command(b'perffncacheload', formatteropts)
1218 1218 def perffncacheload(ui, repo, **opts):
1219 1219 opts = _byteskwargs(opts)
1220 1220 timer, fm = gettimer(ui, opts)
1221 1221 s = repo.store
1222 1222 def d():
1223 1223 s.fncache._load()
1224 1224 timer(d)
1225 1225 fm.end()
1226 1226
1227 1227 @command(b'perffncachewrite', formatteropts)
1228 1228 def perffncachewrite(ui, repo, **opts):
1229 1229 opts = _byteskwargs(opts)
1230 1230 timer, fm = gettimer(ui, opts)
1231 1231 s = repo.store
1232 1232 lock = repo.lock()
1233 1233 s.fncache._load()
1234 1234 tr = repo.transaction(b'perffncachewrite')
1235 1235 tr.addbackup(b'fncache')
1236 1236 def d():
1237 1237 s.fncache._dirty = True
1238 1238 s.fncache.write(tr)
1239 1239 timer(d)
1240 1240 tr.close()
1241 1241 lock.release()
1242 1242 fm.end()
1243 1243
1244 1244 @command(b'perffncacheencode', formatteropts)
1245 1245 def perffncacheencode(ui, repo, **opts):
1246 1246 opts = _byteskwargs(opts)
1247 1247 timer, fm = gettimer(ui, opts)
1248 1248 s = repo.store
1249 1249 s.fncache._load()
1250 1250 def d():
1251 1251 for p in s.fncache.entries:
1252 1252 s.encode(p)
1253 1253 timer(d)
1254 1254 fm.end()
1255 1255
1256 1256 def _bdiffworker(q, blocks, xdiff, ready, done):
1257 1257 while not done.is_set():
1258 1258 pair = q.get()
1259 1259 while pair is not None:
1260 1260 if xdiff:
1261 1261 mdiff.bdiff.xdiffblocks(*pair)
1262 1262 elif blocks:
1263 1263 mdiff.bdiff.blocks(*pair)
1264 1264 else:
1265 1265 mdiff.textdiff(*pair)
1266 1266 q.task_done()
1267 1267 pair = q.get()
1268 1268 q.task_done() # for the None one
1269 1269 with ready:
1270 1270 ready.wait()
1271 1271
1272 1272 def _manifestrevision(repo, mnode):
1273 1273 ml = repo.manifestlog
1274 1274
1275 1275 if util.safehasattr(ml, b'getstorage'):
1276 1276 store = ml.getstorage(b'')
1277 1277 else:
1278 1278 store = ml._revlog
1279 1279
1280 1280 return store.revision(mnode)
1281 1281
1282 1282 @command(b'perfbdiff', revlogopts + formatteropts + [
1283 1283 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1284 1284 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1285 1285 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1286 1286 (b'', b'blocks', False, b'test computing diffs into blocks'),
1287 1287 (b'', b'xdiff', False, b'use xdiff algorithm'),
1288 1288 ],
1289 1289
1290 1290 b'-c|-m|FILE REV')
1291 1291 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1292 1292 """benchmark a bdiff between revisions
1293 1293
1294 1294 By default, benchmark a bdiff between its delta parent and itself.
1295 1295
1296 1296 With ``--count``, benchmark bdiffs between delta parents and self for N
1297 1297 revisions starting at the specified revision.
1298 1298
1299 1299 With ``--alldata``, assume the requested revision is a changeset and
1300 1300 measure bdiffs for all changes related to that changeset (manifest
1301 1301 and filelogs).
1302 1302 """
1303 1303 opts = _byteskwargs(opts)
1304 1304
1305 1305 if opts[b'xdiff'] and not opts[b'blocks']:
1306 1306 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1307 1307
1308 1308 if opts[b'alldata']:
1309 1309 opts[b'changelog'] = True
1310 1310
1311 1311 if opts.get(b'changelog') or opts.get(b'manifest'):
1312 1312 file_, rev = None, file_
1313 1313 elif rev is None:
1314 1314 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1315 1315
1316 1316 blocks = opts[b'blocks']
1317 1317 xdiff = opts[b'xdiff']
1318 1318 textpairs = []
1319 1319
1320 1320 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1321 1321
1322 1322 startrev = r.rev(r.lookup(rev))
1323 1323 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1324 1324 if opts[b'alldata']:
1325 1325 # Load revisions associated with changeset.
1326 1326 ctx = repo[rev]
1327 1327 mtext = _manifestrevision(repo, ctx.manifestnode())
1328 1328 for pctx in ctx.parents():
1329 1329 pman = _manifestrevision(repo, pctx.manifestnode())
1330 1330 textpairs.append((pman, mtext))
1331 1331
1332 1332 # Load filelog revisions by iterating manifest delta.
1333 1333 man = ctx.manifest()
1334 1334 pman = ctx.p1().manifest()
1335 1335 for filename, change in pman.diff(man).items():
1336 1336 fctx = repo.file(filename)
1337 1337 f1 = fctx.revision(change[0][0] or -1)
1338 1338 f2 = fctx.revision(change[1][0] or -1)
1339 1339 textpairs.append((f1, f2))
1340 1340 else:
1341 1341 dp = r.deltaparent(rev)
1342 1342 textpairs.append((r.revision(dp), r.revision(rev)))
1343 1343
1344 1344 withthreads = threads > 0
1345 1345 if not withthreads:
1346 1346 def d():
1347 1347 for pair in textpairs:
1348 1348 if xdiff:
1349 1349 mdiff.bdiff.xdiffblocks(*pair)
1350 1350 elif blocks:
1351 1351 mdiff.bdiff.blocks(*pair)
1352 1352 else:
1353 1353 mdiff.textdiff(*pair)
1354 1354 else:
1355 1355 q = queue()
1356 1356 for i in _xrange(threads):
1357 1357 q.put(None)
1358 1358 ready = threading.Condition()
1359 1359 done = threading.Event()
1360 1360 for i in _xrange(threads):
1361 1361 threading.Thread(target=_bdiffworker,
1362 1362 args=(q, blocks, xdiff, ready, done)).start()
1363 1363 q.join()
1364 1364 def d():
1365 1365 for pair in textpairs:
1366 1366 q.put(pair)
1367 1367 for i in _xrange(threads):
1368 1368 q.put(None)
1369 1369 with ready:
1370 1370 ready.notify_all()
1371 1371 q.join()
1372 1372 timer, fm = gettimer(ui, opts)
1373 1373 timer(d)
1374 1374 fm.end()
1375 1375
1376 1376 if withthreads:
1377 1377 done.set()
1378 1378 for i in _xrange(threads):
1379 1379 q.put(None)
1380 1380 with ready:
1381 1381 ready.notify_all()
1382 1382
1383 1383 @command(b'perfunidiff', revlogopts + formatteropts + [
1384 1384 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1385 1385 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
1386 1386 ], b'-c|-m|FILE REV')
1387 1387 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1388 1388 """benchmark a unified diff between revisions
1389 1389
1390 1390 This doesn't include any copy tracing - it's just a unified diff
1391 1391 of the texts.
1392 1392
1393 1393 By default, benchmark a diff between its delta parent and itself.
1394 1394
1395 1395 With ``--count``, benchmark diffs between delta parents and self for N
1396 1396 revisions starting at the specified revision.
1397 1397
1398 1398 With ``--alldata``, assume the requested revision is a changeset and
1399 1399 measure diffs for all changes related to that changeset (manifest
1400 1400 and filelogs).
1401 1401 """
1402 1402 opts = _byteskwargs(opts)
1403 1403 if opts[b'alldata']:
1404 1404 opts[b'changelog'] = True
1405 1405
1406 1406 if opts.get(b'changelog') or opts.get(b'manifest'):
1407 1407 file_, rev = None, file_
1408 1408 elif rev is None:
1409 1409 raise error.CommandError(b'perfunidiff', b'invalid arguments')
1410 1410
1411 1411 textpairs = []
1412 1412
1413 1413 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
1414 1414
1415 1415 startrev = r.rev(r.lookup(rev))
1416 1416 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1417 1417 if opts[b'alldata']:
1418 1418 # Load revisions associated with changeset.
1419 1419 ctx = repo[rev]
1420 1420 mtext = _manifestrevision(repo, ctx.manifestnode())
1421 1421 for pctx in ctx.parents():
1422 1422 pman = _manifestrevision(repo, pctx.manifestnode())
1423 1423 textpairs.append((pman, mtext))
1424 1424
1425 1425 # Load filelog revisions by iterating manifest delta.
1426 1426 man = ctx.manifest()
1427 1427 pman = ctx.p1().manifest()
1428 1428 for filename, change in pman.diff(man).items():
1429 1429 fctx = repo.file(filename)
1430 1430 f1 = fctx.revision(change[0][0] or -1)
1431 1431 f2 = fctx.revision(change[1][0] or -1)
1432 1432 textpairs.append((f1, f2))
1433 1433 else:
1434 1434 dp = r.deltaparent(rev)
1435 1435 textpairs.append((r.revision(dp), r.revision(rev)))
1436 1436
1437 1437 def d():
1438 1438 for left, right in textpairs:
1439 1439 # The date strings don't matter, so we pass empty strings.
1440 1440 headerlines, hunks = mdiff.unidiff(
1441 1441 left, b'', right, b'', b'left', b'right', binary=False)
1442 1442 # consume iterators in roughly the way patch.py does
1443 1443 b'\n'.join(headerlines)
1444 1444 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1445 1445 timer, fm = gettimer(ui, opts)
1446 1446 timer(d)
1447 1447 fm.end()
1448 1448
1449 1449 @command(b'perfdiffwd', formatteropts)
1450 1450 def perfdiffwd(ui, repo, **opts):
1451 1451 """Profile diff of working directory changes"""
1452 1452 opts = _byteskwargs(opts)
1453 1453 timer, fm = gettimer(ui, opts)
1454 1454 options = {
1455 1455 'w': 'ignore_all_space',
1456 1456 'b': 'ignore_space_change',
1457 1457 'B': 'ignore_blank_lines',
1458 1458 }
1459 1459
1460 1460 for diffopt in ('', 'w', 'b', 'B', 'wB'):
1461 1461 opts = dict((options[c], b'1') for c in diffopt)
1462 1462 def d():
1463 1463 ui.pushbuffer()
1464 1464 commands.diff(ui, repo, **opts)
1465 1465 ui.popbuffer()
1466 1466 diffopt = diffopt.encode('ascii')
1467 1467 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
1468 1468 timer(d, title=title)
1469 1469 fm.end()
1470 1470
1471 1471 @command(b'perfrevlogindex', revlogopts + formatteropts,
1472 1472 b'-c|-m|FILE')
1473 1473 def perfrevlogindex(ui, repo, file_=None, **opts):
1474 1474 """Benchmark operations against a revlog index.
1475 1475
1476 1476 This tests constructing a revlog instance, reading index data,
1477 1477 parsing index data, and performing various operations related to
1478 1478 index data.
1479 1479 """
1480 1480
1481 1481 opts = _byteskwargs(opts)
1482 1482
1483 1483 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
1484 1484
1485 1485 opener = getattr(rl, 'opener') # trick linter
1486 1486 indexfile = rl.indexfile
1487 1487 data = opener.read(indexfile)
1488 1488
1489 1489 header = struct.unpack(b'>I', data[0:4])[0]
1490 1490 version = header & 0xFFFF
1491 1491 if version == 1:
1492 1492 revlogio = revlog.revlogio()
1493 1493 inline = header & (1 << 16)
1494 1494 else:
1495 1495 raise error.Abort((b'unsupported revlog version: %d') % version)
1496 1496
1497 1497 rllen = len(rl)
1498 1498
1499 1499 node0 = rl.node(0)
1500 1500 node25 = rl.node(rllen // 4)
1501 1501 node50 = rl.node(rllen // 2)
1502 1502 node75 = rl.node(rllen // 4 * 3)
1503 1503 node100 = rl.node(rllen - 1)
1504 1504
1505 1505 allrevs = range(rllen)
1506 1506 allrevsrev = list(reversed(allrevs))
1507 1507 allnodes = [rl.node(rev) for rev in range(rllen)]
1508 1508 allnodesrev = list(reversed(allnodes))
1509 1509
1510 1510 def constructor():
1511 1511 revlog.revlog(opener, indexfile)
1512 1512
1513 1513 def read():
1514 1514 with opener(indexfile) as fh:
1515 1515 fh.read()
1516 1516
1517 1517 def parseindex():
1518 1518 revlogio.parseindex(data, inline)
1519 1519
1520 1520 def getentry(revornode):
1521 1521 index = revlogio.parseindex(data, inline)[0]
1522 1522 index[revornode]
1523 1523
1524 1524 def getentries(revs, count=1):
1525 1525 index = revlogio.parseindex(data, inline)[0]
1526 1526
1527 1527 for i in range(count):
1528 1528 for rev in revs:
1529 1529 index[rev]
1530 1530
1531 1531 def resolvenode(node):
1532 1532 nodemap = revlogio.parseindex(data, inline)[1]
1533 1533 # This only works for the C code.
1534 1534 if nodemap is None:
1535 1535 return
1536 1536
1537 1537 try:
1538 1538 nodemap[node]
1539 1539 except error.RevlogError:
1540 1540 pass
1541 1541
1542 1542 def resolvenodes(nodes, count=1):
1543 1543 nodemap = revlogio.parseindex(data, inline)[1]
1544 1544 if nodemap is None:
1545 1545 return
1546 1546
1547 1547 for i in range(count):
1548 1548 for node in nodes:
1549 1549 try:
1550 1550 nodemap[node]
1551 1551 except error.RevlogError:
1552 1552 pass
1553 1553
1554 1554 benches = [
1555 1555 (constructor, b'revlog constructor'),
1556 1556 (read, b'read'),
1557 1557 (parseindex, b'create index object'),
1558 1558 (lambda: getentry(0), b'retrieve index entry for rev 0'),
1559 1559 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
1560 1560 (lambda: resolvenode(node0), b'look up node at rev 0'),
1561 1561 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
1562 1562 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
1563 1563 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
1564 1564 (lambda: resolvenode(node100), b'look up node at tip'),
1565 1565 # 2x variation is to measure caching impact.
1566 1566 (lambda: resolvenodes(allnodes),
1567 1567 b'look up all nodes (forward)'),
1568 1568 (lambda: resolvenodes(allnodes, 2),
1569 1569 b'look up all nodes 2x (forward)'),
1570 1570 (lambda: resolvenodes(allnodesrev),
1571 1571 b'look up all nodes (reverse)'),
1572 1572 (lambda: resolvenodes(allnodesrev, 2),
1573 1573 b'look up all nodes 2x (reverse)'),
1574 1574 (lambda: getentries(allrevs),
1575 1575 b'retrieve all index entries (forward)'),
1576 1576 (lambda: getentries(allrevs, 2),
1577 1577 b'retrieve all index entries 2x (forward)'),
1578 1578 (lambda: getentries(allrevsrev),
1579 1579 b'retrieve all index entries (reverse)'),
1580 1580 (lambda: getentries(allrevsrev, 2),
1581 1581 b'retrieve all index entries 2x (reverse)'),
1582 1582 ]
1583 1583
1584 1584 for fn, title in benches:
1585 1585 timer, fm = gettimer(ui, opts)
1586 1586 timer(fn, title=title)
1587 1587 fm.end()
1588 1588
1589 1589 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
1590 1590 [(b'd', b'dist', 100, b'distance between the revisions'),
1591 1591 (b's', b'startrev', 0, b'revision to start reading at'),
1592 1592 (b'', b'reverse', False, b'read in reverse')],
1593 1593 b'-c|-m|FILE')
1594 1594 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
1595 1595 **opts):
1596 1596 """Benchmark reading a series of revisions from a revlog.
1597 1597
1598 1598 By default, we read every ``-d/--dist`` revision from 0 to tip of
1599 1599 the specified revlog.
1600 1600
1601 1601 The start revision can be defined via ``-s/--startrev``.
1602 1602 """
1603 1603 opts = _byteskwargs(opts)
1604 1604
1605 1605 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
1606 1606 rllen = getlen(ui)(rl)
1607 1607
1608 1608 if startrev < 0:
1609 1609 startrev = rllen + startrev
1610 1610
1611 1611 def d():
1612 1612 rl.clearcaches()
1613 1613
1614 1614 beginrev = startrev
1615 1615 endrev = rllen
1616 1616 dist = opts[b'dist']
1617 1617
1618 1618 if reverse:
1619 1619 beginrev, endrev = endrev - 1, beginrev - 1
1620 1620 dist = -1 * dist
1621 1621
1622 1622 for x in _xrange(beginrev, endrev, dist):
1623 1623 # Old revisions don't support passing int.
1624 1624 n = rl.node(x)
1625 1625 rl.revision(n)
1626 1626
1627 1627 timer, fm = gettimer(ui, opts)
1628 1628 timer(d)
1629 1629 fm.end()
1630 1630
1631 1631 @command(b'perfrevlogwrite', revlogopts + formatteropts +
1632 1632 [(b's', b'startrev', 1000, b'revision to start writing at'),
1633 1633 (b'', b'stoprev', -1, b'last revision to write'),
1634 1634 (b'', b'count', 3, b'last revision to write'),
1635 1635 (b'', b'details', False, b'print timing for every revisions tested'),
1636 1636 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
1637 1637 (b'', b'lazydeltabase', True, b'try the provided delta first'),
1638 1638 ],
1639 1639 b'-c|-m|FILE')
1640 1640 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
1641 1641 """Benchmark writing a series of revisions to a revlog.
1642 1642
1643 1643 Possible source values are:
1644 1644 * `full`: add from a full text (default).
1645 1645 * `parent-1`: add from a delta to the first parent
1646 1646 * `parent-2`: add from a delta to the second parent if it exists
1647 1647 (use a delta from the first parent otherwise)
1648 1648 * `parent-smallest`: add from the smallest delta (either p1 or p2)
1649 1649 * `storage`: add from the existing precomputed deltas
1650 1650 """
1651 1651 opts = _byteskwargs(opts)
1652 1652
1653 1653 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
1654 1654 rllen = getlen(ui)(rl)
1655 1655 if startrev < 0:
1656 1656 startrev = rllen + startrev
1657 1657 if stoprev < 0:
1658 1658 stoprev = rllen + stoprev
1659 1659
1660 1660 lazydeltabase = opts['lazydeltabase']
1661 1661 source = opts['source']
1662 1662 validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest',
1663 1663 b'storage')
1664 1664 if source not in validsource:
1665 1665 raise error.Abort('invalid source type: %s' % source)
1666 1666
1667 1667 ### actually gather results
1668 1668 count = opts['count']
1669 1669 if count <= 0:
1670 1670 raise error.Abort('invalide run count: %d' % count)
1671 1671 allresults = []
1672 1672 for c in range(count):
1673 1673 timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1,
1674 1674 lazydeltabase=lazydeltabase)
1675 1675 allresults.append(timing)
1676 1676
1677 1677 ### consolidate the results in a single list
1678 1678 results = []
1679 1679 for idx, (rev, t) in enumerate(allresults[0]):
1680 1680 ts = [t]
1681 1681 for other in allresults[1:]:
1682 1682 orev, ot = other[idx]
1683 1683 assert orev == rev
1684 1684 ts.append(ot)
1685 1685 results.append((rev, ts))
1686 1686 resultcount = len(results)
1687 1687
1688 1688 ### Compute and display relevant statistics
1689 1689
1690 1690 # get a formatter
1691 1691 fm = ui.formatter(b'perf', opts)
1692 1692 displayall = ui.configbool(b"perf", b"all-timing", False)
1693 1693
1694 1694 # print individual details if requested
1695 1695 if opts['details']:
1696 1696 for idx, item in enumerate(results, 1):
1697 1697 rev, data = item
1698 1698 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
1699 1699 formatone(fm, data, title=title, displayall=displayall)
1700 1700
1701 1701 # sorts results by median time
1702 1702 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
1703 1703 # list of (name, index) to display)
1704 1704 relevants = [
1705 1705 ("min", 0),
1706 1706 ("10%", resultcount * 10 // 100),
1707 1707 ("25%", resultcount * 25 // 100),
1708 1708 ("50%", resultcount * 70 // 100),
1709 1709 ("75%", resultcount * 75 // 100),
1710 1710 ("90%", resultcount * 90 // 100),
1711 1711 ("95%", resultcount * 95 // 100),
1712 1712 ("99%", resultcount * 99 // 100),
1713 1713 ("max", -1),
1714 1714 ]
1715 1715 if not ui.quiet:
1716 1716 for name, idx in relevants:
1717 1717 data = results[idx]
1718 1718 title = '%s of %d, rev %d' % (name, resultcount, data[0])
1719 1719 formatone(fm, data[1], title=title, displayall=displayall)
1720 1720
1721 1721 # XXX summing that many float will not be very precise, we ignore this fact
1722 1722 # for now
1723 1723 totaltime = []
1724 1724 for item in allresults:
1725 1725 totaltime.append((sum(x[1][0] for x in item),
1726 1726 sum(x[1][1] for x in item),
1727 1727 sum(x[1][2] for x in item),)
1728 1728 )
1729 1729 formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
1730 1730 displayall=displayall)
1731 1731 fm.end()
1732 1732
1733 1733 class _faketr(object):
1734 1734 def add(s, x, y, z=None):
1735 1735 return None
1736 1736
1737 1737 def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None,
1738 1738 lazydeltabase=True):
1739 1739 timings = []
1740 1740 tr = _faketr()
1741 1741 with _temprevlog(ui, orig, startrev) as dest:
1742 1742 dest._lazydeltabase = lazydeltabase
1743 1743 revs = list(orig.revs(startrev, stoprev))
1744 1744 total = len(revs)
1745 1745 topic = 'adding'
1746 1746 if runidx is not None:
1747 1747 topic += ' (run #%d)' % runidx
1748 1748 for idx, rev in enumerate(revs):
1749 1749 ui.progress(topic, idx, unit='revs', total=total)
1750 1750 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
1751 1751 with timeone() as r:
1752 1752 dest.addrawrevision(*addargs, **addkwargs)
1753 1753 timings.append((rev, r[0]))
1754 1754 ui.progress(topic, total, unit='revs', total=total)
1755 1755 ui.progress(topic, None, unit='revs', total=total)
1756 1756 return timings
1757 1757
1758 1758 def _getrevisionseed(orig, rev, tr, source):
1759 1759 from mercurial.node import nullid
1760 1760
1761 1761 linkrev = orig.linkrev(rev)
1762 1762 node = orig.node(rev)
1763 1763 p1, p2 = orig.parents(node)
1764 1764 flags = orig.flags(rev)
1765 1765 cachedelta = None
1766 1766 text = None
1767 1767
1768 1768 if source == b'full':
1769 1769 text = orig.revision(rev)
1770 1770 elif source == b'parent-1':
1771 1771 baserev = orig.rev(p1)
1772 1772 cachedelta = (baserev, orig.revdiff(p1, rev))
1773 1773 elif source == b'parent-2':
1774 1774 parent = p2
1775 1775 if p2 == nullid:
1776 1776 parent = p1
1777 1777 baserev = orig.rev(parent)
1778 1778 cachedelta = (baserev, orig.revdiff(parent, rev))
1779 1779 elif source == b'parent-smallest':
1780 1780 p1diff = orig.revdiff(p1, rev)
1781 1781 parent = p1
1782 1782 diff = p1diff
1783 1783 if p2 != nullid:
1784 1784 p2diff = orig.revdiff(p2, rev)
1785 1785 if len(p1diff) > len(p2diff):
1786 1786 parent = p2
1787 1787 diff = p2diff
1788 1788 baserev = orig.rev(parent)
1789 1789 cachedelta = (baserev, diff)
1790 1790 elif source == b'storage':
1791 1791 baserev = orig.deltaparent(rev)
1792 1792 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
1793 1793
1794 1794 return ((text, tr, linkrev, p1, p2),
1795 1795 {'node': node, 'flags': flags, 'cachedelta': cachedelta})
1796 1796
1797 1797 @contextlib.contextmanager
1798 1798 def _temprevlog(ui, orig, truncaterev):
1799 1799 from mercurial import vfs as vfsmod
1800 1800
1801 1801 if orig._inline:
1802 1802 raise error.Abort('not supporting inline revlog (yet)')
1803 1803
1804 1804 origindexpath = orig.opener.join(orig.indexfile)
1805 1805 origdatapath = orig.opener.join(orig.datafile)
1806 1806 indexname = 'revlog.i'
1807 1807 dataname = 'revlog.d'
1808 1808
1809 1809 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
1810 1810 try:
1811 1811 # copy the data file in a temporary directory
1812 1812 ui.debug('copying data in %s\n' % tmpdir)
1813 1813 destindexpath = os.path.join(tmpdir, 'revlog.i')
1814 1814 destdatapath = os.path.join(tmpdir, 'revlog.d')
1815 1815 shutil.copyfile(origindexpath, destindexpath)
1816 1816 shutil.copyfile(origdatapath, destdatapath)
1817 1817
1818 1818 # remove the data we want to add again
1819 1819 ui.debug('truncating data to be rewritten\n')
1820 1820 with open(destindexpath, 'ab') as index:
1821 1821 index.seek(0)
1822 1822 index.truncate(truncaterev * orig._io.size)
1823 1823 with open(destdatapath, 'ab') as data:
1824 1824 data.seek(0)
1825 1825 data.truncate(orig.start(truncaterev))
1826 1826
1827 1827 # instantiate a new revlog from the temporary copy
1828 1828 ui.debug('truncating adding to be rewritten\n')
1829 1829 vfs = vfsmod.vfs(tmpdir)
1830 1830 vfs.options = getattr(orig.opener, 'options', None)
1831 1831
1832 1832 dest = revlog.revlog(vfs,
1833 1833 indexfile=indexname,
1834 1834 datafile=dataname)
1835 1835 if dest._inline:
1836 1836 raise error.Abort('not supporting inline revlog (yet)')
1837 1837 # make sure internals are initialized
1838 1838 dest.revision(len(dest) - 1)
1839 1839 yield dest
1840 1840 del dest, vfs
1841 1841 finally:
1842 1842 shutil.rmtree(tmpdir, True)
1843 1843
1844 1844 @command(b'perfrevlogchunks', revlogopts + formatteropts +
1845 1845 [(b'e', b'engines', b'', b'compression engines to use'),
1846 1846 (b's', b'startrev', 0, b'revision to start at')],
1847 1847 b'-c|-m|FILE')
1848 1848 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
1849 1849 """Benchmark operations on revlog chunks.
1850 1850
1851 1851 Logically, each revlog is a collection of fulltext revisions. However,
1852 1852 stored within each revlog are "chunks" of possibly compressed data. This
1853 1853 data needs to be read and decompressed or compressed and written.
1854 1854
1855 1855 This command measures the time it takes to read+decompress and recompress
1856 1856 chunks in a revlog. It effectively isolates I/O and compression performance.
1857 1857 For measurements of higher-level operations like resolving revisions,
1858 1858 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
1859 1859 """
1860 1860 opts = _byteskwargs(opts)
1861 1861
1862 1862 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
1863 1863
1864 1864 # _chunkraw was renamed to _getsegmentforrevs.
1865 1865 try:
1866 1866 segmentforrevs = rl._getsegmentforrevs
1867 1867 except AttributeError:
1868 1868 segmentforrevs = rl._chunkraw
1869 1869
1870 1870 # Verify engines argument.
1871 1871 if engines:
1872 1872 engines = set(e.strip() for e in engines.split(b','))
1873 1873 for engine in engines:
1874 1874 try:
1875 1875 util.compressionengines[engine]
1876 1876 except KeyError:
1877 1877 raise error.Abort(b'unknown compression engine: %s' % engine)
1878 1878 else:
1879 1879 engines = []
1880 1880 for e in util.compengines:
1881 1881 engine = util.compengines[e]
1882 1882 try:
1883 1883 if engine.available():
1884 1884 engine.revlogcompressor().compress(b'dummy')
1885 1885 engines.append(e)
1886 1886 except NotImplementedError:
1887 1887 pass
1888 1888
1889 1889 revs = list(rl.revs(startrev, len(rl) - 1))
1890 1890
1891 1891 def rlfh(rl):
1892 1892 if rl._inline:
1893 1893 return getsvfs(repo)(rl.indexfile)
1894 1894 else:
1895 1895 return getsvfs(repo)(rl.datafile)
1896 1896
1897 1897 def doread():
1898 1898 rl.clearcaches()
1899 1899 for rev in revs:
1900 1900 segmentforrevs(rev, rev)
1901 1901
1902 1902 def doreadcachedfh():
1903 1903 rl.clearcaches()
1904 1904 fh = rlfh(rl)
1905 1905 for rev in revs:
1906 1906 segmentforrevs(rev, rev, df=fh)
1907 1907
1908 1908 def doreadbatch():
1909 1909 rl.clearcaches()
1910 1910 segmentforrevs(revs[0], revs[-1])
1911 1911
1912 1912 def doreadbatchcachedfh():
1913 1913 rl.clearcaches()
1914 1914 fh = rlfh(rl)
1915 1915 segmentforrevs(revs[0], revs[-1], df=fh)
1916 1916
1917 1917 def dochunk():
1918 1918 rl.clearcaches()
1919 1919 fh = rlfh(rl)
1920 1920 for rev in revs:
1921 1921 rl._chunk(rev, df=fh)
1922 1922
1923 1923 chunks = [None]
1924 1924
1925 1925 def dochunkbatch():
1926 1926 rl.clearcaches()
1927 1927 fh = rlfh(rl)
1928 1928 # Save chunks as a side-effect.
1929 1929 chunks[0] = rl._chunks(revs, df=fh)
1930 1930
1931 1931 def docompress(compressor):
1932 1932 rl.clearcaches()
1933 1933
1934 1934 try:
1935 1935 # Swap in the requested compression engine.
1936 1936 oldcompressor = rl._compressor
1937 1937 rl._compressor = compressor
1938 1938 for chunk in chunks[0]:
1939 1939 rl.compress(chunk)
1940 1940 finally:
1941 1941 rl._compressor = oldcompressor
1942 1942
1943 1943 benches = [
1944 1944 (lambda: doread(), b'read'),
1945 1945 (lambda: doreadcachedfh(), b'read w/ reused fd'),
1946 1946 (lambda: doreadbatch(), b'read batch'),
1947 1947 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
1948 1948 (lambda: dochunk(), b'chunk'),
1949 1949 (lambda: dochunkbatch(), b'chunk batch'),
1950 1950 ]
1951 1951
1952 1952 for engine in sorted(engines):
1953 1953 compressor = util.compengines[engine].revlogcompressor()
1954 1954 benches.append((functools.partial(docompress, compressor),
1955 1955 b'compress w/ %s' % engine))
1956 1956
1957 1957 for fn, title in benches:
1958 1958 timer, fm = gettimer(ui, opts)
1959 1959 timer(fn, title=title)
1960 1960 fm.end()
1961 1961
1962 1962 @command(b'perfrevlogrevision', revlogopts + formatteropts +
1963 1963 [(b'', b'cache', False, b'use caches instead of clearing')],
1964 1964 b'-c|-m|FILE REV')
1965 1965 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
1966 1966 """Benchmark obtaining a revlog revision.
1967 1967
1968 1968 Obtaining a revlog revision consists of roughly the following steps:
1969 1969
1970 1970 1. Compute the delta chain
1971 1971 2. Slice the delta chain if applicable
1972 1972 3. Obtain the raw chunks for that delta chain
1973 1973 4. Decompress each raw chunk
1974 1974 5. Apply binary patches to obtain fulltext
1975 1975 6. Verify hash of fulltext
1976 1976
1977 1977 This command measures the time spent in each of these phases.
1978 1978 """
1979 1979 opts = _byteskwargs(opts)
1980 1980
1981 1981 if opts.get(b'changelog') or opts.get(b'manifest'):
1982 1982 file_, rev = None, file_
1983 1983 elif rev is None:
1984 1984 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
1985 1985
1986 1986 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
1987 1987
1988 1988 # _chunkraw was renamed to _getsegmentforrevs.
1989 1989 try:
1990 1990 segmentforrevs = r._getsegmentforrevs
1991 1991 except AttributeError:
1992 1992 segmentforrevs = r._chunkraw
1993 1993
1994 1994 node = r.lookup(rev)
1995 1995 rev = r.rev(node)
1996 1996
1997 1997 def getrawchunks(data, chain):
1998 1998 start = r.start
1999 1999 length = r.length
2000 2000 inline = r._inline
2001 2001 iosize = r._io.size
2002 2002 buffer = util.buffer
2003 2003
2004 2004 chunks = []
2005 2005 ladd = chunks.append
2006 2006 for idx, item in enumerate(chain):
2007 2007 offset = start(item[0])
2008 2008 bits = data[idx]
2009 2009 for rev in item:
2010 2010 chunkstart = start(rev)
2011 2011 if inline:
2012 2012 chunkstart += (rev + 1) * iosize
2013 2013 chunklength = length(rev)
2014 2014 ladd(buffer(bits, chunkstart - offset, chunklength))
2015 2015
2016 2016 return chunks
2017 2017
2018 2018 def dodeltachain(rev):
2019 2019 if not cache:
2020 2020 r.clearcaches()
2021 2021 r._deltachain(rev)
2022 2022
2023 2023 def doread(chain):
2024 2024 if not cache:
2025 2025 r.clearcaches()
2026 2026 for item in slicedchain:
2027 2027 segmentforrevs(item[0], item[-1])
2028 2028
2029 2029 def doslice(r, chain, size):
2030 2030 for s in slicechunk(r, chain, targetsize=size):
2031 2031 pass
2032 2032
2033 2033 def dorawchunks(data, chain):
2034 2034 if not cache:
2035 2035 r.clearcaches()
2036 2036 getrawchunks(data, chain)
2037 2037
2038 2038 def dodecompress(chunks):
2039 2039 decomp = r.decompress
2040 2040 for chunk in chunks:
2041 2041 decomp(chunk)
2042 2042
2043 2043 def dopatch(text, bins):
2044 2044 if not cache:
2045 2045 r.clearcaches()
2046 2046 mdiff.patches(text, bins)
2047 2047
2048 2048 def dohash(text):
2049 2049 if not cache:
2050 2050 r.clearcaches()
2051 2051 r.checkhash(text, node, rev=rev)
2052 2052
2053 2053 def dorevision():
2054 2054 if not cache:
2055 2055 r.clearcaches()
2056 2056 r.revision(node)
2057 2057
2058 2058 try:
2059 2059 from mercurial.revlogutils.deltas import slicechunk
2060 2060 except ImportError:
2061 2061 slicechunk = getattr(revlog, '_slicechunk', None)
2062 2062
2063 2063 size = r.length(rev)
2064 2064 chain = r._deltachain(rev)[0]
2065 2065 if not getattr(r, '_withsparseread', False):
2066 2066 slicedchain = (chain,)
2067 2067 else:
2068 2068 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
2069 2069 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
2070 2070 rawchunks = getrawchunks(data, slicedchain)
2071 2071 bins = r._chunks(chain)
2072 2072 text = bytes(bins[0])
2073 2073 bins = bins[1:]
2074 2074 text = mdiff.patches(text, bins)
2075 2075
2076 2076 benches = [
2077 2077 (lambda: dorevision(), b'full'),
2078 2078 (lambda: dodeltachain(rev), b'deltachain'),
2079 2079 (lambda: doread(chain), b'read'),
2080 2080 ]
2081 2081
2082 2082 if getattr(r, '_withsparseread', False):
2083 2083 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
2084 2084 benches.append(slicing)
2085 2085
2086 2086 benches.extend([
2087 2087 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
2088 2088 (lambda: dodecompress(rawchunks), b'decompress'),
2089 2089 (lambda: dopatch(text, bins), b'patch'),
2090 2090 (lambda: dohash(text), b'hash'),
2091 2091 ])
2092 2092
2093 2093 timer, fm = gettimer(ui, opts)
2094 2094 for fn, title in benches:
2095 2095 timer(fn, title=title)
2096 2096 fm.end()
2097 2097
2098 2098 @command(b'perfrevset',
2099 2099 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
2100 2100 (b'', b'contexts', False, b'obtain changectx for each revision')]
2101 2101 + formatteropts, b"REVSET")
2102 2102 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
2103 2103 """benchmark the execution time of a revset
2104 2104
2105 2105 Use the --clean option if need to evaluate the impact of build volatile
2106 2106 revisions set cache on the revset execution. Volatile cache hold filtered
2107 2107 and obsolete related cache."""
2108 2108 opts = _byteskwargs(opts)
2109 2109
2110 2110 timer, fm = gettimer(ui, opts)
2111 2111 def d():
2112 2112 if clear:
2113 2113 repo.invalidatevolatilesets()
2114 2114 if contexts:
2115 2115 for ctx in repo.set(expr): pass
2116 2116 else:
2117 2117 for r in repo.revs(expr): pass
2118 2118 timer(d)
2119 2119 fm.end()
2120 2120
2121 2121 @command(b'perfvolatilesets',
2122 2122 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
2123 2123 ] + formatteropts)
2124 2124 def perfvolatilesets(ui, repo, *names, **opts):
2125 2125 """benchmark the computation of various volatile set
2126 2126
2127 2127 Volatile set computes element related to filtering and obsolescence."""
2128 2128 opts = _byteskwargs(opts)
2129 2129 timer, fm = gettimer(ui, opts)
2130 2130 repo = repo.unfiltered()
2131 2131
2132 2132 def getobs(name):
2133 2133 def d():
2134 2134 repo.invalidatevolatilesets()
2135 2135 if opts[b'clear_obsstore']:
2136 2136 clearfilecache(repo, b'obsstore')
2137 2137 obsolete.getrevs(repo, name)
2138 2138 return d
2139 2139
2140 2140 allobs = sorted(obsolete.cachefuncs)
2141 2141 if names:
2142 2142 allobs = [n for n in allobs if n in names]
2143 2143
2144 2144 for name in allobs:
2145 2145 timer(getobs(name), title=name)
2146 2146
2147 2147 def getfiltered(name):
2148 2148 def d():
2149 2149 repo.invalidatevolatilesets()
2150 2150 if opts[b'clear_obsstore']:
2151 2151 clearfilecache(repo, b'obsstore')
2152 2152 repoview.filterrevs(repo, name)
2153 2153 return d
2154 2154
2155 2155 allfilter = sorted(repoview.filtertable)
2156 2156 if names:
2157 2157 allfilter = [n for n in allfilter if n in names]
2158 2158
2159 2159 for name in allfilter:
2160 2160 timer(getfiltered(name), title=name)
2161 2161 fm.end()
2162 2162
2163 2163 @command(b'perfbranchmap',
2164 2164 [(b'f', b'full', False,
2165 2165 b'Includes build time of subset'),
2166 2166 (b'', b'clear-revbranch', False,
2167 2167 b'purge the revbranch cache between computation'),
2168 2168 ] + formatteropts)
2169 2169 def perfbranchmap(ui, repo, *filternames, **opts):
2170 2170 """benchmark the update of a branchmap
2171 2171
2172 2172 This benchmarks the full repo.branchmap() call with read and write disabled
2173 2173 """
2174 2174 opts = _byteskwargs(opts)
2175 2175 full = opts.get(b"full", False)
2176 2176 clear_revbranch = opts.get(b"clear_revbranch", False)
2177 2177 timer, fm = gettimer(ui, opts)
2178 2178 def getbranchmap(filtername):
2179 2179 """generate a benchmark function for the filtername"""
2180 2180 if filtername is None:
2181 2181 view = repo
2182 2182 else:
2183 2183 view = repo.filtered(filtername)
2184 2184 def d():
2185 2185 if clear_revbranch:
2186 2186 repo.revbranchcache()._clear()
2187 2187 if full:
2188 2188 view._branchcaches.clear()
2189 2189 else:
2190 2190 view._branchcaches.pop(filtername, None)
2191 2191 view.branchmap()
2192 2192 return d
2193 2193 # add filter in smaller subset to bigger subset
2194 2194 possiblefilters = set(repoview.filtertable)
2195 2195 if filternames:
2196 2196 possiblefilters &= set(filternames)
2197 2197 subsettable = getbranchmapsubsettable()
2198 2198 allfilters = []
2199 2199 while possiblefilters:
2200 2200 for name in possiblefilters:
2201 2201 subset = subsettable.get(name)
2202 2202 if subset not in possiblefilters:
2203 2203 break
2204 2204 else:
2205 2205 assert False, b'subset cycle %s!' % possiblefilters
2206 2206 allfilters.append(name)
2207 2207 possiblefilters.remove(name)
2208 2208
2209 2209 # warm the cache
2210 2210 if not full:
2211 2211 for name in allfilters:
2212 2212 repo.filtered(name).branchmap()
2213 2213 if not filternames or b'unfiltered' in filternames:
2214 2214 # add unfiltered
2215 2215 allfilters.append(None)
2216 2216
2217 2217 branchcacheread = safeattrsetter(branchmap, b'read')
2218 2218 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
2219 2219 branchcacheread.set(lambda repo: None)
2220 2220 branchcachewrite.set(lambda bc, repo: None)
2221 2221 try:
2222 2222 for name in allfilters:
2223 2223 printname = name
2224 2224 if name is None:
2225 2225 printname = b'unfiltered'
2226 2226 timer(getbranchmap(name), title=str(printname))
2227 2227 finally:
2228 2228 branchcacheread.restore()
2229 2229 branchcachewrite.restore()
2230 2230 fm.end()
2231 2231
2232 2232 @command(b'perfbranchmapload', [
2233 2233 (b'f', b'filter', b'', b'Specify repoview filter'),
2234 2234 (b'', b'list', False, b'List brachmap filter caches'),
2235 2235 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
2236 2236
2237 2237 ] + formatteropts)
2238 2238 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
2239 2239 """benchmark reading the branchmap"""
2240 2240 opts = _byteskwargs(opts)
2241 2241 clearrevlogs = opts[b'clear_revlogs']
2242 2242
2243 2243 if list:
2244 2244 for name, kind, st in repo.cachevfs.readdir(stat=True):
2245 2245 if name.startswith(b'branch2'):
2246 2246 filtername = name.partition(b'-')[2] or b'unfiltered'
2247 2247 ui.status(b'%s - %s\n'
2248 2248 % (filtername, util.bytecount(st.st_size)))
2249 2249 return
2250 2250 if filter:
2251 2251 repo = repoview.repoview(repo, filter)
2252 2252 else:
2253 2253 repo = repo.unfiltered()
2254 2254 # try once without timer, the filter may not be cached
2255 2255 if branchmap.read(repo) is None:
2256 2256 raise error.Abort(b'No branchmap cached for %s repo'
2257 2257 % (filter or b'unfiltered'))
2258 2258 timer, fm = gettimer(ui, opts)
2259 2259 def setup():
2260 2260 if clearrevlogs:
2261 2261 clearchangelog(repo)
2262 2262 def bench():
2263 2263 branchmap.read(repo)
2264 2264 timer(bench, setup=setup)
2265 2265 fm.end()
2266 2266
2267 2267 @command(b'perfloadmarkers')
2268 2268 def perfloadmarkers(ui, repo):
2269 2269 """benchmark the time to parse the on-disk markers for a repo
2270 2270
2271 2271 Result is the number of markers in the repo."""
2272 2272 timer, fm = gettimer(ui)
2273 2273 svfs = getsvfs(repo)
2274 2274 timer(lambda: len(obsolete.obsstore(svfs)))
2275 2275 fm.end()
2276 2276
2277 2277 @command(b'perflrucachedict', formatteropts +
2278 2278 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
2279 2279 (b'', b'mincost', 0, b'smallest cost of items in cache'),
2280 2280 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
2281 2281 (b'', b'size', 4, b'size of cache'),
2282 2282 (b'', b'gets', 10000, b'number of key lookups'),
2283 2283 (b'', b'sets', 10000, b'number of key sets'),
2284 2284 (b'', b'mixed', 10000, b'number of mixed mode operations'),
2285 2285 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
2286 2286 norepo=True)
2287 2287 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
2288 2288 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
2289 2289 opts = _byteskwargs(opts)
2290 2290
2291 2291 def doinit():
2292 2292 for i in _xrange(10000):
2293 2293 util.lrucachedict(size)
2294 2294
2295 2295 costrange = list(range(mincost, maxcost + 1))
2296 2296
2297 2297 values = []
2298 2298 for i in _xrange(size):
2299 2299 values.append(random.randint(0, _maxint))
2300 2300
2301 2301 # Get mode fills the cache and tests raw lookup performance with no
2302 2302 # eviction.
2303 2303 getseq = []
2304 2304 for i in _xrange(gets):
2305 2305 getseq.append(random.choice(values))
2306 2306
2307 2307 def dogets():
2308 2308 d = util.lrucachedict(size)
2309 2309 for v in values:
2310 2310 d[v] = v
2311 2311 for key in getseq:
2312 2312 value = d[key]
2313 2313 value # silence pyflakes warning
2314 2314
2315 2315 def dogetscost():
2316 2316 d = util.lrucachedict(size, maxcost=costlimit)
2317 2317 for i, v in enumerate(values):
2318 2318 d.insert(v, v, cost=costs[i])
2319 2319 for key in getseq:
2320 2320 try:
2321 2321 value = d[key]
2322 2322 value # silence pyflakes warning
2323 2323 except KeyError:
2324 2324 pass
2325 2325
2326 2326 # Set mode tests insertion speed with cache eviction.
2327 2327 setseq = []
2328 2328 costs = []
2329 2329 for i in _xrange(sets):
2330 2330 setseq.append(random.randint(0, _maxint))
2331 2331 costs.append(random.choice(costrange))
2332 2332
2333 2333 def doinserts():
2334 2334 d = util.lrucachedict(size)
2335 2335 for v in setseq:
2336 2336 d.insert(v, v)
2337 2337
2338 2338 def doinsertscost():
2339 2339 d = util.lrucachedict(size, maxcost=costlimit)
2340 2340 for i, v in enumerate(setseq):
2341 2341 d.insert(v, v, cost=costs[i])
2342 2342
2343 2343 def dosets():
2344 2344 d = util.lrucachedict(size)
2345 2345 for v in setseq:
2346 2346 d[v] = v
2347 2347
2348 2348 # Mixed mode randomly performs gets and sets with eviction.
2349 2349 mixedops = []
2350 2350 for i in _xrange(mixed):
2351 2351 r = random.randint(0, 100)
2352 2352 if r < mixedgetfreq:
2353 2353 op = 0
2354 2354 else:
2355 2355 op = 1
2356 2356
2357 2357 mixedops.append((op,
2358 2358 random.randint(0, size * 2),
2359 2359 random.choice(costrange)))
2360 2360
2361 2361 def domixed():
2362 2362 d = util.lrucachedict(size)
2363 2363
2364 2364 for op, v, cost in mixedops:
2365 2365 if op == 0:
2366 2366 try:
2367 2367 d[v]
2368 2368 except KeyError:
2369 2369 pass
2370 2370 else:
2371 2371 d[v] = v
2372 2372
2373 2373 def domixedcost():
2374 2374 d = util.lrucachedict(size, maxcost=costlimit)
2375 2375
2376 2376 for op, v, cost in mixedops:
2377 2377 if op == 0:
2378 2378 try:
2379 2379 d[v]
2380 2380 except KeyError:
2381 2381 pass
2382 2382 else:
2383 2383 d.insert(v, v, cost=cost)
2384 2384
2385 2385 benches = [
2386 2386 (doinit, b'init'),
2387 2387 ]
2388 2388
2389 2389 if costlimit:
2390 2390 benches.extend([
2391 2391 (dogetscost, b'gets w/ cost limit'),
2392 2392 (doinsertscost, b'inserts w/ cost limit'),
2393 2393 (domixedcost, b'mixed w/ cost limit'),
2394 2394 ])
2395 2395 else:
2396 2396 benches.extend([
2397 2397 (dogets, b'gets'),
2398 2398 (doinserts, b'inserts'),
2399 2399 (dosets, b'sets'),
2400 2400 (domixed, b'mixed')
2401 2401 ])
2402 2402
2403 2403 for fn, title in benches:
2404 2404 timer, fm = gettimer(ui, opts)
2405 2405 timer(fn, title=title)
2406 2406 fm.end()
2407 2407
2408 2408 @command(b'perfwrite', formatteropts)
2409 2409 def perfwrite(ui, repo, **opts):
2410 2410 """microbenchmark ui.write
2411 2411 """
2412 2412 opts = _byteskwargs(opts)
2413 2413
2414 2414 timer, fm = gettimer(ui, opts)
2415 2415 def write():
2416 2416 for i in range(100000):
2417 2417 ui.write((b'Testing write performance\n'))
2418 2418 timer(write)
2419 2419 fm.end()
2420 2420
2421 2421 def uisetup(ui):
2422 2422 if (util.safehasattr(cmdutil, b'openrevlog') and
2423 2423 not util.safehasattr(commands, b'debugrevlogopts')):
2424 2424 # for "historical portability":
2425 2425 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
2426 2426 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
2427 2427 # openrevlog() should cause failure, because it has been
2428 2428 # available since 3.5 (or 49c583ca48c4).
2429 2429 def openrevlog(orig, repo, cmd, file_, opts):
2430 2430 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
2431 2431 raise error.Abort(b"This version doesn't support --dir option",
2432 2432 hint=b"use 3.5 or later")
2433 2433 return orig(repo, cmd, file_, opts)
2434 2434 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
@@ -1,285 +1,286 b''
1 1 #require test-repo
2 2
3 3 Set vars:
4 4
5 5 $ . "$TESTDIR/helpers-testrepo.sh"
6 6 $ CONTRIBDIR="$TESTDIR/../contrib"
7 7
8 8 Prepare repo:
9 9
10 10 $ hg init
11 11
12 12 $ echo this is file a > a
13 13 $ hg add a
14 14 $ hg commit -m first
15 15
16 16 $ echo adding to file a >> a
17 17 $ hg commit -m second
18 18
19 19 $ echo adding more to file a >> a
20 20 $ hg commit -m third
21 21
22 22 $ hg up -r 0
23 23 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
24 24 $ echo merge-this >> a
25 25 $ hg commit -m merge-able
26 26 created new head
27 27
28 28 $ hg up -r 2
29 29 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
30 30
31 31 perfstatus
32 32
33 33 $ cat >> $HGRCPATH << EOF
34 34 > [extensions]
35 35 > perfstatusext=$CONTRIBDIR/perf.py
36 36 > [perf]
37 37 > presleep=0
38 38 > stub=on
39 39 > parentscount=1
40 40 > EOF
41 41 $ hg help perfstatusext
42 42 perfstatusext extension - helper extension to measure performance
43 43
44 44 list of commands:
45 45
46 46 perfaddremove
47 47 (no help text available)
48 48 perfancestors
49 49 (no help text available)
50 50 perfancestorset
51 51 (no help text available)
52 52 perfannotate (no help text available)
53 53 perfbdiff benchmark a bdiff between revisions
54 54 perfbookmarks
55 55 benchmark parsing bookmarks from disk to memory
56 56 perfbranchmap
57 57 benchmark the update of a branchmap
58 58 perfbranchmapload
59 59 benchmark reading the branchmap
60 60 perfbundleread
61 61 Benchmark reading of bundle files.
62 62 perfcca (no help text available)
63 63 perfchangegroupchangelog
64 64 Benchmark producing a changelog group for a changegroup.
65 65 perfchangeset
66 66 (no help text available)
67 67 perfctxfiles (no help text available)
68 68 perfdiffwd Profile diff of working directory changes
69 69 perfdirfoldmap
70 70 (no help text available)
71 71 perfdirs (no help text available)
72 72 perfdirstate (no help text available)
73 73 perfdirstatedirs
74 74 (no help text available)
75 75 perfdirstatefoldmap
76 76 (no help text available)
77 77 perfdirstatewrite
78 78 (no help text available)
79 79 perffncacheencode
80 80 (no help text available)
81 81 perffncacheload
82 82 (no help text available)
83 83 perffncachewrite
84 84 (no help text available)
85 85 perfheads (no help text available)
86 86 perfhelper-tracecopies
87 87 find statistic about potential parameters for the
88 88 'perftracecopies'
89 89 perfindex (no help text available)
90 90 perflinelogedits
91 91 (no help text available)
92 92 perfloadmarkers
93 93 benchmark the time to parse the on-disk markers for a repo
94 94 perflog (no help text available)
95 95 perflookup (no help text available)
96 96 perflrucachedict
97 97 (no help text available)
98 98 perfmanifest benchmark the time to read a manifest from disk and return a
99 99 usable
100 100 perfmergecalculate
101 101 (no help text available)
102 102 perfmoonwalk benchmark walking the changelog backwards
103 103 perfnodelookup
104 104 (no help text available)
105 105 perfparents (no help text available)
106 106 perfpathcopies
107 107 (no help text available)
108 108 perfphases benchmark phasesets computation
109 109 perfphasesremote
110 110 benchmark time needed to analyse phases of the remote server
111 111 perfrawfiles (no help text available)
112 112 perfrevlogchunks
113 113 Benchmark operations on revlog chunks.
114 114 perfrevlogindex
115 115 Benchmark operations against a revlog index.
116 116 perfrevlogrevision
117 117 Benchmark obtaining a revlog revision.
118 118 perfrevlogrevisions
119 119 Benchmark reading a series of revisions from a revlog.
120 120 perfrevlogwrite
121 121 Benchmark writing a series of revisions to a revlog.
122 122 perfrevrange (no help text available)
123 123 perfrevset benchmark the execution time of a revset
124 124 perfstartup (no help text available)
125 125 perfstatus (no help text available)
126 126 perftags (no help text available)
127 127 perftemplating
128 128 test the rendering time of a given template
129 129 perfunidiff benchmark a unified diff between revisions
130 130 perfvolatilesets
131 131 benchmark the computation of various volatile set
132 132 perfwalk (no help text available)
133 133 perfwrite microbenchmark ui.write
134 134
135 135 (use 'hg help -v perfstatusext' to show built-in aliases and global options)
136 136 $ hg perfaddremove
137 137 $ hg perfancestors
138 138 $ hg perfancestorset 2
139 139 $ hg perfannotate a
140 140 $ hg perfbdiff -c 1
141 141 $ hg perfbdiff --alldata 1
142 142 $ hg perfunidiff -c 1
143 143 $ hg perfunidiff --alldata 1
144 144 $ hg perfbookmarks
145 145 $ hg perfbranchmap
146 146 $ hg perfcca
147 147 $ hg perfchangegroupchangelog
148 $ hg perfchangegroupchangelog --cgversion 01
148 149 $ hg perfchangeset 2
149 150 $ hg perfctxfiles 2
150 151 $ hg perfdiffwd
151 152 $ hg perfdirfoldmap
152 153 $ hg perfdirs
153 154 $ hg perfdirstate
154 155 $ hg perfdirstatedirs
155 156 $ hg perfdirstatefoldmap
156 157 $ hg perfdirstatewrite
157 158 #if repofncache
158 159 $ hg perffncacheencode
159 160 $ hg perffncacheload
160 161 $ hg debugrebuildfncache
161 162 fncache already up to date
162 163 $ hg perffncachewrite
163 164 $ hg debugrebuildfncache
164 165 fncache already up to date
165 166 #endif
166 167 $ hg perfheads
167 168 $ hg perfindex
168 169 $ hg perflinelogedits -n 1
169 170 $ hg perfloadmarkers
170 171 $ hg perflog
171 172 $ hg perflookup 2
172 173 $ hg perflrucache
173 174 $ hg perfmanifest 2
174 175 $ hg perfmanifest -m 44fe2c8352bb3a478ffd7d8350bbc721920134d1
175 176 $ hg perfmanifest -m 44fe2c8352bb
176 177 abort: manifest revision must be integer or full node
177 178 [255]
178 179 $ hg perfmergecalculate -r 3
179 180 $ hg perfmoonwalk
180 181 $ hg perfnodelookup 2
181 182 $ hg perfpathcopies 1 2
182 183 $ hg perfrawfiles 2
183 184 $ hg perfrevlogindex -c
184 185 #if reporevlogstore
185 186 $ hg perfrevlogrevisions .hg/store/data/a.i
186 187 #endif
187 188 $ hg perfrevlogrevision -m 0
188 189 $ hg perfrevlogchunks -c
189 190 $ hg perfrevrange
190 191 $ hg perfrevset 'all()'
191 192 $ hg perfstartup
192 193 $ hg perfstatus
193 194 $ hg perftags
194 195 $ hg perftemplating
195 196 $ hg perfvolatilesets
196 197 $ hg perfwalk
197 198 $ hg perfparents
198 199
199 200 test actual output
200 201 ------------------
201 202
202 203 normal output:
203 204
204 205 $ hg perfheads --config perf.stub=no
205 206 ! wall * comb * user * sys * (best of *) (glob)
206 207
207 208 detailed output:
208 209
209 210 $ hg perfheads --config perf.all-timing=yes --config perf.stub=no
210 211 ! wall * comb * user * sys * (best of *) (glob)
211 212 ! wall * comb * user * sys * (max of *) (glob)
212 213 ! wall * comb * user * sys * (avg of *) (glob)
213 214 ! wall * comb * user * sys * (median of *) (glob)
214 215
215 216 test json output
216 217 ----------------
217 218
218 219 normal output:
219 220
220 221 $ hg perfheads --template json --config perf.stub=no
221 222 [
222 223 {
223 224 "comb": *, (glob)
224 225 "count": *, (glob)
225 226 "sys": *, (glob)
226 227 "user": *, (glob)
227 228 "wall": * (glob)
228 229 }
229 230 ]
230 231
231 232 detailed output:
232 233
233 234 $ hg perfheads --template json --config perf.all-timing=yes --config perf.stub=no
234 235 [
235 236 {
236 237 "avg.comb": *, (glob)
237 238 "avg.count": *, (glob)
238 239 "avg.sys": *, (glob)
239 240 "avg.user": *, (glob)
240 241 "avg.wall": *, (glob)
241 242 "comb": *, (glob)
242 243 "count": *, (glob)
243 244 "max.comb": *, (glob)
244 245 "max.count": *, (glob)
245 246 "max.sys": *, (glob)
246 247 "max.user": *, (glob)
247 248 "max.wall": *, (glob)
248 249 "median.comb": *, (glob)
249 250 "median.count": *, (glob)
250 251 "median.sys": *, (glob)
251 252 "median.user": *, (glob)
252 253 "median.wall": *, (glob)
253 254 "sys": *, (glob)
254 255 "user": *, (glob)
255 256 "wall": * (glob)
256 257 }
257 258 ]
258 259
259 260 Check perf.py for historical portability
260 261 ----------------------------------------
261 262
262 263 $ cd "$TESTDIR/.."
263 264
264 265 $ (testrepohg files -r 1.2 glob:mercurial/*.c glob:mercurial/*.py;
265 266 > testrepohg files -r tip glob:mercurial/*.c glob:mercurial/*.py) |
266 267 > "$TESTDIR"/check-perf-code.py contrib/perf.py
267 268 contrib/perf.py:\d+: (re)
268 269 > from mercurial import (
269 270 import newer module separately in try clause for early Mercurial
270 271 contrib/perf.py:\d+: (re)
271 272 > from mercurial import (
272 273 import newer module separately in try clause for early Mercurial
273 274 contrib/perf.py:\d+: (re)
274 275 > origindexpath = orig.opener.join(orig.indexfile)
275 276 use getvfs()/getsvfs() for early Mercurial
276 277 contrib/perf.py:\d+: (re)
277 278 > origdatapath = orig.opener.join(orig.datafile)
278 279 use getvfs()/getsvfs() for early Mercurial
279 280 contrib/perf.py:\d+: (re)
280 281 > vfs = vfsmod.vfs(tmpdir)
281 282 use getvfs()/getsvfs() for early Mercurial
282 283 contrib/perf.py:\d+: (re)
283 284 > vfs.options = getattr(orig.opener, 'options', None)
284 285 use getvfs()/getsvfs() for early Mercurial
285 286 [1]
General Comments 0
You need to be logged in to leave comments. Login now