##// END OF EJS Templates
perfdiscovery: benching findcommonheads()...
Georges Racinet -
r40977:db6cace1 default
parent child Browse files
Show More
@@ -1,2632 +1,2653 b''
1 1 # perf.py - performance test routines
2 2 '''helper extension to measure performance'''
3 3
4 4 # "historical portability" policy of perf.py:
5 5 #
6 6 # We have to do:
7 7 # - make perf.py "loadable" with as wide Mercurial version as possible
8 8 # This doesn't mean that perf commands work correctly with that Mercurial.
9 9 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
10 10 # - make historical perf command work correctly with as wide Mercurial
11 11 # version as possible
12 12 #
13 13 # We have to do, if possible with reasonable cost:
14 14 # - make recent perf command for historical feature work correctly
15 15 # with early Mercurial
16 16 #
17 17 # We don't have to do:
18 18 # - make perf command for recent feature work correctly with early
19 19 # Mercurial
20 20
21 21 from __future__ import absolute_import
22 22 import contextlib
23 23 import functools
24 24 import gc
25 25 import os
26 26 import random
27 27 import shutil
28 28 import struct
29 29 import sys
30 30 import tempfile
31 31 import threading
32 32 import time
33 33 from mercurial import (
34 34 changegroup,
35 35 cmdutil,
36 36 commands,
37 37 copies,
38 38 error,
39 39 extensions,
40 hg,
40 41 mdiff,
41 42 merge,
42 43 revlog,
43 44 util,
44 45 )
45 46
46 47 # for "historical portability":
47 48 # try to import modules separately (in dict order), and ignore
48 49 # failure, because these aren't available with early Mercurial
49 50 try:
50 51 from mercurial import branchmap # since 2.5 (or bcee63733aad)
51 52 except ImportError:
52 53 pass
53 54 try:
54 55 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
55 56 except ImportError:
56 57 pass
57 58 try:
58 59 from mercurial import registrar # since 3.7 (or 37d50250b696)
59 60 dir(registrar) # forcibly load it
60 61 except ImportError:
61 62 registrar = None
62 63 try:
63 64 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
64 65 except ImportError:
65 66 pass
66 67 try:
67 68 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
68 69 except ImportError:
69 70 pass
71 try:
72 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
73 except ImportError:
74 pass
75
70 76
71 77 def identity(a):
72 78 return a
73 79
74 80 try:
75 81 from mercurial import pycompat
76 82 getargspec = pycompat.getargspec # added to module after 4.5
77 83 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
78 84 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
79 85 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
80 86 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
81 87 if pycompat.ispy3:
82 88 _maxint = sys.maxsize # per py3 docs for replacing maxint
83 89 else:
84 90 _maxint = sys.maxint
85 91 except (ImportError, AttributeError):
86 92 import inspect
87 93 getargspec = inspect.getargspec
88 94 _byteskwargs = identity
89 95 fsencode = identity # no py3 support
90 96 _maxint = sys.maxint # no py3 support
91 97 _sysstr = lambda x: x # no py3 support
92 98 _xrange = xrange
93 99
94 100 try:
95 101 # 4.7+
96 102 queue = pycompat.queue.Queue
97 103 except (AttributeError, ImportError):
98 104 # <4.7.
99 105 try:
100 106 queue = pycompat.queue
101 107 except (AttributeError, ImportError):
102 108 queue = util.queue
103 109
104 110 try:
105 111 from mercurial import logcmdutil
106 112 makelogtemplater = logcmdutil.maketemplater
107 113 except (AttributeError, ImportError):
108 114 try:
109 115 makelogtemplater = cmdutil.makelogtemplater
110 116 except (AttributeError, ImportError):
111 117 makelogtemplater = None
112 118
113 119 # for "historical portability":
114 120 # define util.safehasattr forcibly, because util.safehasattr has been
115 121 # available since 1.9.3 (or 94b200a11cf7)
116 122 _undefined = object()
117 123 def safehasattr(thing, attr):
118 124 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
119 125 setattr(util, 'safehasattr', safehasattr)
120 126
121 127 # for "historical portability":
122 128 # define util.timer forcibly, because util.timer has been available
123 129 # since ae5d60bb70c9
124 130 if safehasattr(time, 'perf_counter'):
125 131 util.timer = time.perf_counter
126 132 elif os.name == b'nt':
127 133 util.timer = time.clock
128 134 else:
129 135 util.timer = time.time
130 136
131 137 # for "historical portability":
132 138 # use locally defined empty option list, if formatteropts isn't
133 139 # available, because commands.formatteropts has been available since
134 140 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
135 141 # available since 2.2 (or ae5f92e154d3)
136 142 formatteropts = getattr(cmdutil, "formatteropts",
137 143 getattr(commands, "formatteropts", []))
138 144
139 145 # for "historical portability":
140 146 # use locally defined option list, if debugrevlogopts isn't available,
141 147 # because commands.debugrevlogopts has been available since 3.7 (or
142 148 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
143 149 # since 1.9 (or a79fea6b3e77).
144 150 revlogopts = getattr(cmdutil, "debugrevlogopts",
145 151 getattr(commands, "debugrevlogopts", [
146 152 (b'c', b'changelog', False, (b'open changelog')),
147 153 (b'm', b'manifest', False, (b'open manifest')),
148 154 (b'', b'dir', False, (b'open directory manifest')),
149 155 ]))
150 156
151 157 cmdtable = {}
152 158
153 159 # for "historical portability":
154 160 # define parsealiases locally, because cmdutil.parsealiases has been
155 161 # available since 1.5 (or 6252852b4332)
156 162 def parsealiases(cmd):
157 163 return cmd.split(b"|")
158 164
159 165 if safehasattr(registrar, 'command'):
160 166 command = registrar.command(cmdtable)
161 167 elif safehasattr(cmdutil, 'command'):
162 168 command = cmdutil.command(cmdtable)
163 169 if b'norepo' not in getargspec(command).args:
164 170 # for "historical portability":
165 171 # wrap original cmdutil.command, because "norepo" option has
166 172 # been available since 3.1 (or 75a96326cecb)
167 173 _command = command
168 174 def command(name, options=(), synopsis=None, norepo=False):
169 175 if norepo:
170 176 commands.norepo += b' %s' % b' '.join(parsealiases(name))
171 177 return _command(name, list(options), synopsis)
172 178 else:
173 179 # for "historical portability":
174 180 # define "@command" annotation locally, because cmdutil.command
175 181 # has been available since 1.9 (or 2daa5179e73f)
176 182 def command(name, options=(), synopsis=None, norepo=False):
177 183 def decorator(func):
178 184 if synopsis:
179 185 cmdtable[name] = func, list(options), synopsis
180 186 else:
181 187 cmdtable[name] = func, list(options)
182 188 if norepo:
183 189 commands.norepo += b' %s' % b' '.join(parsealiases(name))
184 190 return func
185 191 return decorator
186 192
187 193 try:
188 194 import mercurial.registrar
189 195 import mercurial.configitems
190 196 configtable = {}
191 197 configitem = mercurial.registrar.configitem(configtable)
192 198 configitem(b'perf', b'presleep',
193 199 default=mercurial.configitems.dynamicdefault,
194 200 )
195 201 configitem(b'perf', b'stub',
196 202 default=mercurial.configitems.dynamicdefault,
197 203 )
198 204 configitem(b'perf', b'parentscount',
199 205 default=mercurial.configitems.dynamicdefault,
200 206 )
201 207 configitem(b'perf', b'all-timing',
202 208 default=mercurial.configitems.dynamicdefault,
203 209 )
204 210 except (ImportError, AttributeError):
205 211 pass
206 212
207 213 def getlen(ui):
208 214 if ui.configbool(b"perf", b"stub", False):
209 215 return lambda x: 1
210 216 return len
211 217
212 218 def gettimer(ui, opts=None):
213 219 """return a timer function and formatter: (timer, formatter)
214 220
215 221 This function exists to gather the creation of formatter in a single
216 222 place instead of duplicating it in all performance commands."""
217 223
218 224 # enforce an idle period before execution to counteract power management
219 225 # experimental config: perf.presleep
220 226 time.sleep(getint(ui, b"perf", b"presleep", 1))
221 227
222 228 if opts is None:
223 229 opts = {}
224 230 # redirect all to stderr unless buffer api is in use
225 231 if not ui._buffers:
226 232 ui = ui.copy()
227 233 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
228 234 if uifout:
229 235 # for "historical portability":
230 236 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
231 237 uifout.set(ui.ferr)
232 238
233 239 # get a formatter
234 240 uiformatter = getattr(ui, 'formatter', None)
235 241 if uiformatter:
236 242 fm = uiformatter(b'perf', opts)
237 243 else:
238 244 # for "historical portability":
239 245 # define formatter locally, because ui.formatter has been
240 246 # available since 2.2 (or ae5f92e154d3)
241 247 from mercurial import node
242 248 class defaultformatter(object):
243 249 """Minimized composition of baseformatter and plainformatter
244 250 """
245 251 def __init__(self, ui, topic, opts):
246 252 self._ui = ui
247 253 if ui.debugflag:
248 254 self.hexfunc = node.hex
249 255 else:
250 256 self.hexfunc = node.short
251 257 def __nonzero__(self):
252 258 return False
253 259 __bool__ = __nonzero__
254 260 def startitem(self):
255 261 pass
256 262 def data(self, **data):
257 263 pass
258 264 def write(self, fields, deftext, *fielddata, **opts):
259 265 self._ui.write(deftext % fielddata, **opts)
260 266 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
261 267 if cond:
262 268 self._ui.write(deftext % fielddata, **opts)
263 269 def plain(self, text, **opts):
264 270 self._ui.write(text, **opts)
265 271 def end(self):
266 272 pass
267 273 fm = defaultformatter(ui, b'perf', opts)
268 274
269 275 # stub function, runs code only once instead of in a loop
270 276 # experimental config: perf.stub
271 277 if ui.configbool(b"perf", b"stub", False):
272 278 return functools.partial(stub_timer, fm), fm
273 279
274 280 # experimental config: perf.all-timing
275 281 displayall = ui.configbool(b"perf", b"all-timing", False)
276 282 return functools.partial(_timer, fm, displayall=displayall), fm
277 283
278 284 def stub_timer(fm, func, setup=None, title=None):
279 285 if setup is not None:
280 286 setup()
281 287 func()
282 288
283 289 @contextlib.contextmanager
284 290 def timeone():
285 291 r = []
286 292 ostart = os.times()
287 293 cstart = util.timer()
288 294 yield r
289 295 cstop = util.timer()
290 296 ostop = os.times()
291 297 a, b = ostart, ostop
292 298 r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
293 299
294 300 def _timer(fm, func, setup=None, title=None, displayall=False):
295 301 gc.collect()
296 302 results = []
297 303 begin = util.timer()
298 304 count = 0
299 305 while True:
300 306 if setup is not None:
301 307 setup()
302 308 with timeone() as item:
303 309 r = func()
304 310 count += 1
305 311 results.append(item[0])
306 312 cstop = util.timer()
307 313 if cstop - begin > 3 and count >= 100:
308 314 break
309 315 if cstop - begin > 10 and count >= 3:
310 316 break
311 317
312 318 formatone(fm, results, title=title, result=r,
313 319 displayall=displayall)
314 320
315 321 def formatone(fm, timings, title=None, result=None, displayall=False):
316 322
317 323 count = len(timings)
318 324
319 325 fm.startitem()
320 326
321 327 if title:
322 328 fm.write(b'title', b'! %s\n', title)
323 329 if result:
324 330 fm.write(b'result', b'! result: %s\n', result)
325 331 def display(role, entry):
326 332 prefix = b''
327 333 if role != b'best':
328 334 prefix = b'%s.' % role
329 335 fm.plain(b'!')
330 336 fm.write(prefix + b'wall', b' wall %f', entry[0])
331 337 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
332 338 fm.write(prefix + b'user', b' user %f', entry[1])
333 339 fm.write(prefix + b'sys', b' sys %f', entry[2])
334 340 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
335 341 fm.plain(b'\n')
336 342 timings.sort()
337 343 min_val = timings[0]
338 344 display(b'best', min_val)
339 345 if displayall:
340 346 max_val = timings[-1]
341 347 display(b'max', max_val)
342 348 avg = tuple([sum(x) / count for x in zip(*timings)])
343 349 display(b'avg', avg)
344 350 median = timings[len(timings) // 2]
345 351 display(b'median', median)
346 352
347 353 # utilities for historical portability
348 354
349 355 def getint(ui, section, name, default):
350 356 # for "historical portability":
351 357 # ui.configint has been available since 1.9 (or fa2b596db182)
352 358 v = ui.config(section, name, None)
353 359 if v is None:
354 360 return default
355 361 try:
356 362 return int(v)
357 363 except ValueError:
358 364 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
359 365 % (section, name, v))
360 366
361 367 def safeattrsetter(obj, name, ignoremissing=False):
362 368 """Ensure that 'obj' has 'name' attribute before subsequent setattr
363 369
364 370 This function is aborted, if 'obj' doesn't have 'name' attribute
365 371 at runtime. This avoids overlooking removal of an attribute, which
366 372 breaks assumption of performance measurement, in the future.
367 373
368 374 This function returns the object to (1) assign a new value, and
369 375 (2) restore an original value to the attribute.
370 376
371 377 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
372 378 abortion, and this function returns None. This is useful to
373 379 examine an attribute, which isn't ensured in all Mercurial
374 380 versions.
375 381 """
376 382 if not util.safehasattr(obj, name):
377 383 if ignoremissing:
378 384 return None
379 385 raise error.Abort((b"missing attribute %s of %s might break assumption"
380 386 b" of performance measurement") % (name, obj))
381 387
382 388 origvalue = getattr(obj, _sysstr(name))
383 389 class attrutil(object):
384 390 def set(self, newvalue):
385 391 setattr(obj, _sysstr(name), newvalue)
386 392 def restore(self):
387 393 setattr(obj, _sysstr(name), origvalue)
388 394
389 395 return attrutil()
390 396
391 397 # utilities to examine each internal API changes
392 398
393 399 def getbranchmapsubsettable():
394 400 # for "historical portability":
395 401 # subsettable is defined in:
396 402 # - branchmap since 2.9 (or 175c6fd8cacc)
397 403 # - repoview since 2.5 (or 59a9f18d4587)
398 404 for mod in (branchmap, repoview):
399 405 subsettable = getattr(mod, 'subsettable', None)
400 406 if subsettable:
401 407 return subsettable
402 408
403 409 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
404 410 # branchmap and repoview modules exist, but subsettable attribute
405 411 # doesn't)
406 412 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
407 413 hint=b"use 2.5 or later")
408 414
409 415 def getsvfs(repo):
410 416 """Return appropriate object to access files under .hg/store
411 417 """
412 418 # for "historical portability":
413 419 # repo.svfs has been available since 2.3 (or 7034365089bf)
414 420 svfs = getattr(repo, 'svfs', None)
415 421 if svfs:
416 422 return svfs
417 423 else:
418 424 return getattr(repo, 'sopener')
419 425
420 426 def getvfs(repo):
421 427 """Return appropriate object to access files under .hg
422 428 """
423 429 # for "historical portability":
424 430 # repo.vfs has been available since 2.3 (or 7034365089bf)
425 431 vfs = getattr(repo, 'vfs', None)
426 432 if vfs:
427 433 return vfs
428 434 else:
429 435 return getattr(repo, 'opener')
430 436
431 437 def repocleartagscachefunc(repo):
432 438 """Return the function to clear tags cache according to repo internal API
433 439 """
434 440 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
435 441 # in this case, setattr(repo, '_tagscache', None) or so isn't
436 442 # correct way to clear tags cache, because existing code paths
437 443 # expect _tagscache to be a structured object.
438 444 def clearcache():
439 445 # _tagscache has been filteredpropertycache since 2.5 (or
440 446 # 98c867ac1330), and delattr() can't work in such case
441 447 if b'_tagscache' in vars(repo):
442 448 del repo.__dict__[b'_tagscache']
443 449 return clearcache
444 450
445 451 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
446 452 if repotags: # since 1.4 (or 5614a628d173)
447 453 return lambda : repotags.set(None)
448 454
449 455 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
450 456 if repotagscache: # since 0.6 (or d7df759d0e97)
451 457 return lambda : repotagscache.set(None)
452 458
453 459 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
454 460 # this point, but it isn't so problematic, because:
455 461 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
456 462 # in perftags() causes failure soon
457 463 # - perf.py itself has been available since 1.1 (or eb240755386d)
458 464 raise error.Abort((b"tags API of this hg command is unknown"))
459 465
460 466 # utilities to clear cache
461 467
462 468 def clearfilecache(obj, attrname):
463 469 unfiltered = getattr(obj, 'unfiltered', None)
464 470 if unfiltered is not None:
465 471 obj = obj.unfiltered()
466 472 if attrname in vars(obj):
467 473 delattr(obj, attrname)
468 474 obj._filecache.pop(attrname, None)
469 475
470 476 def clearchangelog(repo):
471 477 if repo is not repo.unfiltered():
472 478 object.__setattr__(repo, r'_clcachekey', None)
473 479 object.__setattr__(repo, r'_clcache', None)
474 480 clearfilecache(repo.unfiltered(), 'changelog')
475 481
476 482 # perf commands
477 483
478 484 @command(b'perfwalk', formatteropts)
479 485 def perfwalk(ui, repo, *pats, **opts):
480 486 opts = _byteskwargs(opts)
481 487 timer, fm = gettimer(ui, opts)
482 488 m = scmutil.match(repo[None], pats, {})
483 489 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
484 490 ignored=False))))
485 491 fm.end()
486 492
487 493 @command(b'perfannotate', formatteropts)
488 494 def perfannotate(ui, repo, f, **opts):
489 495 opts = _byteskwargs(opts)
490 496 timer, fm = gettimer(ui, opts)
491 497 fc = repo[b'.'][f]
492 498 timer(lambda: len(fc.annotate(True)))
493 499 fm.end()
494 500
495 501 @command(b'perfstatus',
496 502 [(b'u', b'unknown', False,
497 503 b'ask status to look for unknown files')] + formatteropts)
498 504 def perfstatus(ui, repo, **opts):
499 505 opts = _byteskwargs(opts)
500 506 #m = match.always(repo.root, repo.getcwd())
501 507 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
502 508 # False))))
503 509 timer, fm = gettimer(ui, opts)
504 510 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
505 511 fm.end()
506 512
507 513 @command(b'perfaddremove', formatteropts)
508 514 def perfaddremove(ui, repo, **opts):
509 515 opts = _byteskwargs(opts)
510 516 timer, fm = gettimer(ui, opts)
511 517 try:
512 518 oldquiet = repo.ui.quiet
513 519 repo.ui.quiet = True
514 520 matcher = scmutil.match(repo[None])
515 521 opts[b'dry_run'] = True
516 522 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
517 523 finally:
518 524 repo.ui.quiet = oldquiet
519 525 fm.end()
520 526
521 527 def clearcaches(cl):
522 528 # behave somewhat consistently across internal API changes
523 529 if util.safehasattr(cl, b'clearcaches'):
524 530 cl.clearcaches()
525 531 elif util.safehasattr(cl, b'_nodecache'):
526 532 from mercurial.node import nullid, nullrev
527 533 cl._nodecache = {nullid: nullrev}
528 534 cl._nodepos = None
529 535
530 536 @command(b'perfheads', formatteropts)
531 537 def perfheads(ui, repo, **opts):
532 538 opts = _byteskwargs(opts)
533 539 timer, fm = gettimer(ui, opts)
534 540 cl = repo.changelog
535 541 def d():
536 542 len(cl.headrevs())
537 543 clearcaches(cl)
538 544 timer(d)
539 545 fm.end()
540 546
541 547 @command(b'perftags', formatteropts+
542 548 [
543 549 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
544 550 ])
545 551 def perftags(ui, repo, **opts):
546 552 opts = _byteskwargs(opts)
547 553 timer, fm = gettimer(ui, opts)
548 554 repocleartagscache = repocleartagscachefunc(repo)
549 555 clearrevlogs = opts[b'clear_revlogs']
550 556 def s():
551 557 if clearrevlogs:
552 558 clearchangelog(repo)
553 559 clearfilecache(repo.unfiltered(), 'manifest')
554 560 repocleartagscache()
555 561 def t():
556 562 return len(repo.tags())
557 563 timer(t, setup=s)
558 564 fm.end()
559 565
560 566 @command(b'perfancestors', formatteropts)
561 567 def perfancestors(ui, repo, **opts):
562 568 opts = _byteskwargs(opts)
563 569 timer, fm = gettimer(ui, opts)
564 570 heads = repo.changelog.headrevs()
565 571 def d():
566 572 for a in repo.changelog.ancestors(heads):
567 573 pass
568 574 timer(d)
569 575 fm.end()
570 576
571 577 @command(b'perfancestorset', formatteropts)
572 578 def perfancestorset(ui, repo, revset, **opts):
573 579 opts = _byteskwargs(opts)
574 580 timer, fm = gettimer(ui, opts)
575 581 revs = repo.revs(revset)
576 582 heads = repo.changelog.headrevs()
577 583 def d():
578 584 s = repo.changelog.ancestors(heads)
579 585 for rev in revs:
580 586 rev in s
581 587 timer(d)
582 588 fm.end()
583 589
590 @command(b'perfdiscovery', formatteropts, b'PATH')
591 def perfdiscovery(ui, repo, path, **opts):
592 """benchmark discovery between local repo and the peer at given path
593 """
594 repos = [repo, None]
595 timer, fm = gettimer(ui, opts)
596 path = ui.expandpath(path)
597
598 def s():
599 repos[1] = hg.peer(ui, opts, path)
600 def d():
601 setdiscovery.findcommonheads(ui, *repos)
602 timer(d, setup=s)
603 fm.end()
604
584 605 @command(b'perfbookmarks', formatteropts +
585 606 [
586 607 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
587 608 ])
588 609 def perfbookmarks(ui, repo, **opts):
589 610 """benchmark parsing bookmarks from disk to memory"""
590 611 opts = _byteskwargs(opts)
591 612 timer, fm = gettimer(ui, opts)
592 613
593 614 clearrevlogs = opts[b'clear_revlogs']
594 615 def s():
595 616 if clearrevlogs:
596 617 clearchangelog(repo)
597 618 clearfilecache(repo, b'_bookmarks')
598 619 def d():
599 620 repo._bookmarks
600 621 timer(d, setup=s)
601 622 fm.end()
602 623
603 624 @command(b'perfbundleread', formatteropts, b'BUNDLE')
604 625 def perfbundleread(ui, repo, bundlepath, **opts):
605 626 """Benchmark reading of bundle files.
606 627
607 628 This command is meant to isolate the I/O part of bundle reading as
608 629 much as possible.
609 630 """
610 631 from mercurial import (
611 632 bundle2,
612 633 exchange,
613 634 streamclone,
614 635 )
615 636
616 637 opts = _byteskwargs(opts)
617 638
618 639 def makebench(fn):
619 640 def run():
620 641 with open(bundlepath, b'rb') as fh:
621 642 bundle = exchange.readbundle(ui, fh, bundlepath)
622 643 fn(bundle)
623 644
624 645 return run
625 646
626 647 def makereadnbytes(size):
627 648 def run():
628 649 with open(bundlepath, b'rb') as fh:
629 650 bundle = exchange.readbundle(ui, fh, bundlepath)
630 651 while bundle.read(size):
631 652 pass
632 653
633 654 return run
634 655
635 656 def makestdioread(size):
636 657 def run():
637 658 with open(bundlepath, b'rb') as fh:
638 659 while fh.read(size):
639 660 pass
640 661
641 662 return run
642 663
643 664 # bundle1
644 665
645 666 def deltaiter(bundle):
646 667 for delta in bundle.deltaiter():
647 668 pass
648 669
649 670 def iterchunks(bundle):
650 671 for chunk in bundle.getchunks():
651 672 pass
652 673
653 674 # bundle2
654 675
655 676 def forwardchunks(bundle):
656 677 for chunk in bundle._forwardchunks():
657 678 pass
658 679
659 680 def iterparts(bundle):
660 681 for part in bundle.iterparts():
661 682 pass
662 683
663 684 def iterpartsseekable(bundle):
664 685 for part in bundle.iterparts(seekable=True):
665 686 pass
666 687
667 688 def seek(bundle):
668 689 for part in bundle.iterparts(seekable=True):
669 690 part.seek(0, os.SEEK_END)
670 691
671 692 def makepartreadnbytes(size):
672 693 def run():
673 694 with open(bundlepath, b'rb') as fh:
674 695 bundle = exchange.readbundle(ui, fh, bundlepath)
675 696 for part in bundle.iterparts():
676 697 while part.read(size):
677 698 pass
678 699
679 700 return run
680 701
681 702 benches = [
682 703 (makestdioread(8192), b'read(8k)'),
683 704 (makestdioread(16384), b'read(16k)'),
684 705 (makestdioread(32768), b'read(32k)'),
685 706 (makestdioread(131072), b'read(128k)'),
686 707 ]
687 708
688 709 with open(bundlepath, b'rb') as fh:
689 710 bundle = exchange.readbundle(ui, fh, bundlepath)
690 711
691 712 if isinstance(bundle, changegroup.cg1unpacker):
692 713 benches.extend([
693 714 (makebench(deltaiter), b'cg1 deltaiter()'),
694 715 (makebench(iterchunks), b'cg1 getchunks()'),
695 716 (makereadnbytes(8192), b'cg1 read(8k)'),
696 717 (makereadnbytes(16384), b'cg1 read(16k)'),
697 718 (makereadnbytes(32768), b'cg1 read(32k)'),
698 719 (makereadnbytes(131072), b'cg1 read(128k)'),
699 720 ])
700 721 elif isinstance(bundle, bundle2.unbundle20):
701 722 benches.extend([
702 723 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
703 724 (makebench(iterparts), b'bundle2 iterparts()'),
704 725 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
705 726 (makebench(seek), b'bundle2 part seek()'),
706 727 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
707 728 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
708 729 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
709 730 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
710 731 ])
711 732 elif isinstance(bundle, streamclone.streamcloneapplier):
712 733 raise error.Abort(b'stream clone bundles not supported')
713 734 else:
714 735 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
715 736
716 737 for fn, title in benches:
717 738 timer, fm = gettimer(ui, opts)
718 739 timer(fn, title=title)
719 740 fm.end()
720 741
721 742 @command(b'perfchangegroupchangelog', formatteropts +
722 743 [(b'', b'cgversion', b'02', b'changegroup version'),
723 744 (b'r', b'rev', b'', b'revisions to add to changegroup')])
724 745 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
725 746 """Benchmark producing a changelog group for a changegroup.
726 747
727 748 This measures the time spent processing the changelog during a
728 749 bundle operation. This occurs during `hg bundle` and on a server
729 750 processing a `getbundle` wire protocol request (handles clones
730 751 and pull requests).
731 752
732 753 By default, all revisions are added to the changegroup.
733 754 """
734 755 opts = _byteskwargs(opts)
735 756 cl = repo.changelog
736 757 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
737 758 bundler = changegroup.getbundler(cgversion, repo)
738 759
739 760 def d():
740 761 state, chunks = bundler._generatechangelog(cl, nodes)
741 762 for chunk in chunks:
742 763 pass
743 764
744 765 timer, fm = gettimer(ui, opts)
745 766
746 767 # Terminal printing can interfere with timing. So disable it.
747 768 with ui.configoverride({(b'progress', b'disable'): True}):
748 769 timer(d)
749 770
750 771 fm.end()
751 772
752 773 @command(b'perfdirs', formatteropts)
753 774 def perfdirs(ui, repo, **opts):
754 775 opts = _byteskwargs(opts)
755 776 timer, fm = gettimer(ui, opts)
756 777 dirstate = repo.dirstate
757 778 b'a' in dirstate
758 779 def d():
759 780 dirstate.hasdir(b'a')
760 781 del dirstate._map._dirs
761 782 timer(d)
762 783 fm.end()
763 784
764 785 @command(b'perfdirstate', formatteropts)
765 786 def perfdirstate(ui, repo, **opts):
766 787 opts = _byteskwargs(opts)
767 788 timer, fm = gettimer(ui, opts)
768 789 b"a" in repo.dirstate
769 790 def d():
770 791 repo.dirstate.invalidate()
771 792 b"a" in repo.dirstate
772 793 timer(d)
773 794 fm.end()
774 795
775 796 @command(b'perfdirstatedirs', formatteropts)
776 797 def perfdirstatedirs(ui, repo, **opts):
777 798 opts = _byteskwargs(opts)
778 799 timer, fm = gettimer(ui, opts)
779 800 b"a" in repo.dirstate
780 801 def d():
781 802 repo.dirstate.hasdir(b"a")
782 803 del repo.dirstate._map._dirs
783 804 timer(d)
784 805 fm.end()
785 806
786 807 @command(b'perfdirstatefoldmap', formatteropts)
787 808 def perfdirstatefoldmap(ui, repo, **opts):
788 809 opts = _byteskwargs(opts)
789 810 timer, fm = gettimer(ui, opts)
790 811 dirstate = repo.dirstate
791 812 b'a' in dirstate
792 813 def d():
793 814 dirstate._map.filefoldmap.get(b'a')
794 815 del dirstate._map.filefoldmap
795 816 timer(d)
796 817 fm.end()
797 818
798 819 @command(b'perfdirfoldmap', formatteropts)
799 820 def perfdirfoldmap(ui, repo, **opts):
800 821 opts = _byteskwargs(opts)
801 822 timer, fm = gettimer(ui, opts)
802 823 dirstate = repo.dirstate
803 824 b'a' in dirstate
804 825 def d():
805 826 dirstate._map.dirfoldmap.get(b'a')
806 827 del dirstate._map.dirfoldmap
807 828 del dirstate._map._dirs
808 829 timer(d)
809 830 fm.end()
810 831
811 832 @command(b'perfdirstatewrite', formatteropts)
812 833 def perfdirstatewrite(ui, repo, **opts):
813 834 opts = _byteskwargs(opts)
814 835 timer, fm = gettimer(ui, opts)
815 836 ds = repo.dirstate
816 837 b"a" in ds
817 838 def d():
818 839 ds._dirty = True
819 840 ds.write(repo.currenttransaction())
820 841 timer(d)
821 842 fm.end()
822 843
823 844 @command(b'perfmergecalculate',
824 845 [(b'r', b'rev', b'.', b'rev to merge against')] + formatteropts)
825 846 def perfmergecalculate(ui, repo, rev, **opts):
826 847 opts = _byteskwargs(opts)
827 848 timer, fm = gettimer(ui, opts)
828 849 wctx = repo[None]
829 850 rctx = scmutil.revsingle(repo, rev, rev)
830 851 ancestor = wctx.ancestor(rctx)
831 852 # we don't want working dir files to be stat'd in the benchmark, so prime
832 853 # that cache
833 854 wctx.dirty()
834 855 def d():
835 856 # acceptremote is True because we don't want prompts in the middle of
836 857 # our benchmark
837 858 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
838 859 acceptremote=True, followcopies=True)
839 860 timer(d)
840 861 fm.end()
841 862
842 863 @command(b'perfpathcopies', [], b"REV REV")
843 864 def perfpathcopies(ui, repo, rev1, rev2, **opts):
844 865 """benchmark the copy tracing logic"""
845 866 opts = _byteskwargs(opts)
846 867 timer, fm = gettimer(ui, opts)
847 868 ctx1 = scmutil.revsingle(repo, rev1, rev1)
848 869 ctx2 = scmutil.revsingle(repo, rev2, rev2)
849 870 def d():
850 871 copies.pathcopies(ctx1, ctx2)
851 872 timer(d)
852 873 fm.end()
853 874
854 875 @command(b'perfphases',
855 876 [(b'', b'full', False, b'include file reading time too'),
856 877 ], b"")
857 878 def perfphases(ui, repo, **opts):
858 879 """benchmark phasesets computation"""
859 880 opts = _byteskwargs(opts)
860 881 timer, fm = gettimer(ui, opts)
861 882 _phases = repo._phasecache
862 883 full = opts.get(b'full')
863 884 def d():
864 885 phases = _phases
865 886 if full:
866 887 clearfilecache(repo, b'_phasecache')
867 888 phases = repo._phasecache
868 889 phases.invalidate()
869 890 phases.loadphaserevs(repo)
870 891 timer(d)
871 892 fm.end()
872 893
873 894 @command(b'perfphasesremote',
874 895 [], b"[DEST]")
875 896 def perfphasesremote(ui, repo, dest=None, **opts):
876 897 """benchmark time needed to analyse phases of the remote server"""
877 898 from mercurial.node import (
878 899 bin,
879 900 )
880 901 from mercurial import (
881 902 exchange,
882 903 hg,
883 904 phases,
884 905 )
885 906 opts = _byteskwargs(opts)
886 907 timer, fm = gettimer(ui, opts)
887 908
888 909 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
889 910 if not path:
890 911 raise error.Abort((b'default repository not configured!'),
891 912 hint=(b"see 'hg help config.paths'"))
892 913 dest = path.pushloc or path.loc
893 914 branches = (path.branch, opts.get(b'branch') or [])
894 915 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
895 916 revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get(b'rev'))
896 917 other = hg.peer(repo, opts, dest)
897 918
898 919 # easier to perform discovery through the operation
899 920 op = exchange.pushoperation(repo, other)
900 921 exchange._pushdiscoverychangeset(op)
901 922
902 923 remotesubset = op.fallbackheads
903 924
904 925 with other.commandexecutor() as e:
905 926 remotephases = e.callcommand(b'listkeys',
906 927 {b'namespace': b'phases'}).result()
907 928 del other
908 929 publishing = remotephases.get(b'publishing', False)
909 930 if publishing:
910 931 ui.status((b'publishing: yes\n'))
911 932 else:
912 933 ui.status((b'publishing: no\n'))
913 934
914 935 nodemap = repo.changelog.nodemap
915 936 nonpublishroots = 0
916 937 for nhex, phase in remotephases.iteritems():
917 938 if nhex == b'publishing': # ignore data related to publish option
918 939 continue
919 940 node = bin(nhex)
920 941 if node in nodemap and int(phase):
921 942 nonpublishroots += 1
922 943 ui.status((b'number of roots: %d\n') % len(remotephases))
923 944 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
924 945 def d():
925 946 phases.remotephasessummary(repo,
926 947 remotesubset,
927 948 remotephases)
928 949 timer(d)
929 950 fm.end()
930 951
931 952 @command(b'perfmanifest',[
932 953 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
933 954 (b'', b'clear-disk', False, b'clear on-disk caches too'),
934 955 ] + formatteropts, b'REV|NODE')
935 956 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
936 957 """benchmark the time to read a manifest from disk and return a usable
937 958 dict-like object
938 959
939 960 Manifest caches are cleared before retrieval."""
940 961 opts = _byteskwargs(opts)
941 962 timer, fm = gettimer(ui, opts)
942 963 if not manifest_rev:
943 964 ctx = scmutil.revsingle(repo, rev, rev)
944 965 t = ctx.manifestnode()
945 966 else:
946 967 from mercurial.node import bin
947 968
948 969 if len(rev) == 40:
949 970 t = bin(rev)
950 971 else:
951 972 try:
952 973 rev = int(rev)
953 974
954 975 if util.safehasattr(repo.manifestlog, b'getstorage'):
955 976 t = repo.manifestlog.getstorage(b'').node(rev)
956 977 else:
957 978 t = repo.manifestlog._revlog.lookup(rev)
958 979 except ValueError:
959 980 raise error.Abort(b'manifest revision must be integer or full '
960 981 b'node')
961 982 def d():
962 983 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
963 984 repo.manifestlog[t].read()
964 985 timer(d)
965 986 fm.end()
966 987
967 988 @command(b'perfchangeset', formatteropts)
968 989 def perfchangeset(ui, repo, rev, **opts):
969 990 opts = _byteskwargs(opts)
970 991 timer, fm = gettimer(ui, opts)
971 992 n = scmutil.revsingle(repo, rev).node()
972 993 def d():
973 994 repo.changelog.read(n)
974 995 #repo.changelog._cache = None
975 996 timer(d)
976 997 fm.end()
977 998
978 999 @command(b'perfignore', formatteropts)
979 1000 def perfignore(ui, repo, **opts):
980 1001 """benchmark operation related to computing ignore"""
981 1002 opts = _byteskwargs(opts)
982 1003 timer, fm = gettimer(ui, opts)
983 1004 dirstate = repo.dirstate
984 1005
985 1006 def setupone():
986 1007 dirstate.invalidate()
987 1008 clearfilecache(dirstate, b'_ignore')
988 1009
989 1010 def runone():
990 1011 dirstate._ignore
991 1012
992 1013 timer(runone, setup=setupone, title=b"load")
993 1014 fm.end()
994 1015
995 1016 @command(b'perfindex', [
996 1017 (b'', b'rev', '', b'revision to be looked up (default tip)'),
997 1018 ] + formatteropts)
998 1019 def perfindex(ui, repo, **opts):
999 1020 import mercurial.revlog
1000 1021 opts = _byteskwargs(opts)
1001 1022 timer, fm = gettimer(ui, opts)
1002 1023 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1003 1024 if opts['rev'] is None:
1004 1025 n = repo[b"tip"].node()
1005 1026 else:
1006 1027 rev = scmutil.revsingle(repo, opts['rev'])
1007 1028 n = repo[rev].node()
1008 1029
1009 1030 unfi = repo.unfiltered()
1010 1031 # find the filecache func directly
1011 1032 # This avoid polluting the benchmark with the filecache logic
1012 1033 makecl = unfi.__class__.changelog.func
1013 1034 def setup():
1014 1035 # probably not necessary, but for good measure
1015 1036 clearchangelog(unfi)
1016 1037 def d():
1017 1038 cl = makecl(unfi)
1018 1039 cl.rev(n)
1019 1040 timer(d, setup=setup)
1020 1041 fm.end()
1021 1042
1022 1043 @command(b'perfstartup', formatteropts)
1023 1044 def perfstartup(ui, repo, **opts):
1024 1045 opts = _byteskwargs(opts)
1025 1046 timer, fm = gettimer(ui, opts)
1026 1047 def d():
1027 1048 if os.name != r'nt':
1028 1049 os.system(b"HGRCPATH= %s version -q > /dev/null" %
1029 1050 fsencode(sys.argv[0]))
1030 1051 else:
1031 1052 os.environ[r'HGRCPATH'] = r' '
1032 1053 os.system(r"%s version -q > NUL" % sys.argv[0])
1033 1054 timer(d)
1034 1055 fm.end()
1035 1056
1036 1057 @command(b'perfparents', formatteropts)
1037 1058 def perfparents(ui, repo, **opts):
1038 1059 opts = _byteskwargs(opts)
1039 1060 timer, fm = gettimer(ui, opts)
1040 1061 # control the number of commits perfparents iterates over
1041 1062 # experimental config: perf.parentscount
1042 1063 count = getint(ui, b"perf", b"parentscount", 1000)
1043 1064 if len(repo.changelog) < count:
1044 1065 raise error.Abort(b"repo needs %d commits for this test" % count)
1045 1066 repo = repo.unfiltered()
1046 1067 nl = [repo.changelog.node(i) for i in _xrange(count)]
1047 1068 def d():
1048 1069 for n in nl:
1049 1070 repo.changelog.parents(n)
1050 1071 timer(d)
1051 1072 fm.end()
1052 1073
1053 1074 @command(b'perfctxfiles', formatteropts)
1054 1075 def perfctxfiles(ui, repo, x, **opts):
1055 1076 opts = _byteskwargs(opts)
1056 1077 x = int(x)
1057 1078 timer, fm = gettimer(ui, opts)
1058 1079 def d():
1059 1080 len(repo[x].files())
1060 1081 timer(d)
1061 1082 fm.end()
1062 1083
1063 1084 @command(b'perfrawfiles', formatteropts)
1064 1085 def perfrawfiles(ui, repo, x, **opts):
1065 1086 opts = _byteskwargs(opts)
1066 1087 x = int(x)
1067 1088 timer, fm = gettimer(ui, opts)
1068 1089 cl = repo.changelog
1069 1090 def d():
1070 1091 len(cl.read(x)[3])
1071 1092 timer(d)
1072 1093 fm.end()
1073 1094
1074 1095 @command(b'perflookup', formatteropts)
1075 1096 def perflookup(ui, repo, rev, **opts):
1076 1097 opts = _byteskwargs(opts)
1077 1098 timer, fm = gettimer(ui, opts)
1078 1099 timer(lambda: len(repo.lookup(rev)))
1079 1100 fm.end()
1080 1101
1081 1102 @command(b'perflinelogedits',
1082 1103 [(b'n', b'edits', 10000, b'number of edits'),
1083 1104 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1084 1105 ], norepo=True)
1085 1106 def perflinelogedits(ui, **opts):
1086 1107 from mercurial import linelog
1087 1108
1088 1109 opts = _byteskwargs(opts)
1089 1110
1090 1111 edits = opts[b'edits']
1091 1112 maxhunklines = opts[b'max_hunk_lines']
1092 1113
1093 1114 maxb1 = 100000
1094 1115 random.seed(0)
1095 1116 randint = random.randint
1096 1117 currentlines = 0
1097 1118 arglist = []
1098 1119 for rev in _xrange(edits):
1099 1120 a1 = randint(0, currentlines)
1100 1121 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1101 1122 b1 = randint(0, maxb1)
1102 1123 b2 = randint(b1, b1 + maxhunklines)
1103 1124 currentlines += (b2 - b1) - (a2 - a1)
1104 1125 arglist.append((rev, a1, a2, b1, b2))
1105 1126
1106 1127 def d():
1107 1128 ll = linelog.linelog()
1108 1129 for args in arglist:
1109 1130 ll.replacelines(*args)
1110 1131
1111 1132 timer, fm = gettimer(ui, opts)
1112 1133 timer(d)
1113 1134 fm.end()
1114 1135
1115 1136 @command(b'perfrevrange', formatteropts)
1116 1137 def perfrevrange(ui, repo, *specs, **opts):
1117 1138 opts = _byteskwargs(opts)
1118 1139 timer, fm = gettimer(ui, opts)
1119 1140 revrange = scmutil.revrange
1120 1141 timer(lambda: len(revrange(repo, specs)))
1121 1142 fm.end()
1122 1143
1123 1144 @command(b'perfnodelookup', formatteropts)
1124 1145 def perfnodelookup(ui, repo, rev, **opts):
1125 1146 opts = _byteskwargs(opts)
1126 1147 timer, fm = gettimer(ui, opts)
1127 1148 import mercurial.revlog
1128 1149 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1129 1150 n = scmutil.revsingle(repo, rev).node()
1130 1151 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1131 1152 def d():
1132 1153 cl.rev(n)
1133 1154 clearcaches(cl)
1134 1155 timer(d)
1135 1156 fm.end()
1136 1157
1137 1158 @command(b'perflog',
1138 1159 [(b'', b'rename', False, b'ask log to follow renames')
1139 1160 ] + formatteropts)
1140 1161 def perflog(ui, repo, rev=None, **opts):
1141 1162 opts = _byteskwargs(opts)
1142 1163 if rev is None:
1143 1164 rev=[]
1144 1165 timer, fm = gettimer(ui, opts)
1145 1166 ui.pushbuffer()
1146 1167 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1147 1168 copies=opts.get(b'rename')))
1148 1169 ui.popbuffer()
1149 1170 fm.end()
1150 1171
1151 1172 @command(b'perfmoonwalk', formatteropts)
1152 1173 def perfmoonwalk(ui, repo, **opts):
1153 1174 """benchmark walking the changelog backwards
1154 1175
1155 1176 This also loads the changelog data for each revision in the changelog.
1156 1177 """
1157 1178 opts = _byteskwargs(opts)
1158 1179 timer, fm = gettimer(ui, opts)
1159 1180 def moonwalk():
1160 1181 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1161 1182 ctx = repo[i]
1162 1183 ctx.branch() # read changelog data (in addition to the index)
1163 1184 timer(moonwalk)
1164 1185 fm.end()
1165 1186
1166 1187 @command(b'perftemplating',
1167 1188 [(b'r', b'rev', [], b'revisions to run the template on'),
1168 1189 ] + formatteropts)
1169 1190 def perftemplating(ui, repo, testedtemplate=None, **opts):
1170 1191 """test the rendering time of a given template"""
1171 1192 if makelogtemplater is None:
1172 1193 raise error.Abort((b"perftemplating not available with this Mercurial"),
1173 1194 hint=b"use 4.3 or later")
1174 1195
1175 1196 opts = _byteskwargs(opts)
1176 1197
1177 1198 nullui = ui.copy()
1178 1199 nullui.fout = open(os.devnull, r'wb')
1179 1200 nullui.disablepager()
1180 1201 revs = opts.get(b'rev')
1181 1202 if not revs:
1182 1203 revs = [b'all()']
1183 1204 revs = list(scmutil.revrange(repo, revs))
1184 1205
1185 1206 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1186 1207 b' {author|person}: {desc|firstline}\n')
1187 1208 if testedtemplate is None:
1188 1209 testedtemplate = defaulttemplate
1189 1210 displayer = makelogtemplater(nullui, repo, testedtemplate)
1190 1211 def format():
1191 1212 for r in revs:
1192 1213 ctx = repo[r]
1193 1214 displayer.show(ctx)
1194 1215 displayer.flush(ctx)
1195 1216
1196 1217 timer, fm = gettimer(ui, opts)
1197 1218 timer(format)
1198 1219 fm.end()
1199 1220
1200 1221 @command(b'perfhelper-pathcopies', formatteropts +
1201 1222 [
1202 1223 (b'r', b'revs', [], b'restrict search to these revisions'),
1203 1224 (b'', b'timing', False, b'provides extra data (costly)'),
1204 1225 ])
1205 1226 def perfhelperpathcopies(ui, repo, revs=[], **opts):
1206 1227 """find statistic about potential parameters for the `perftracecopies`
1207 1228
1208 1229 This command find source-destination pair relevant for copytracing testing.
1209 1230 It report value for some of the parameters that impact copy tracing time.
1210 1231
1211 1232 If `--timing` is set, rename detection is run and the associated timing
1212 1233 will be reported. The extra details comes at the cost of a slower command
1213 1234 execution.
1214 1235
1215 1236 Since the rename detection is only run once, other factors might easily
1216 1237 affect the precision of the timing. However it should give a good
1217 1238 approximation of which revision pairs are very costly.
1218 1239 """
1219 1240 opts = _byteskwargs(opts)
1220 1241 fm = ui.formatter(b'perf', opts)
1221 1242 dotiming = opts[b'timing']
1222 1243
1223 1244 if dotiming:
1224 1245 header = '%12s %12s %12s %12s %12s %12s\n'
1225 1246 output = ("%(source)12s %(destination)12s "
1226 1247 "%(nbrevs)12d %(nbmissingfiles)12d "
1227 1248 "%(nbrenamedfiles)12d %(time)18.5f\n")
1228 1249 header_names = ("source", "destination", "nb-revs", "nb-files",
1229 1250 "nb-renames", "time")
1230 1251 fm.plain(header % header_names)
1231 1252 else:
1232 1253 header = '%12s %12s %12s %12s\n'
1233 1254 output = ("%(source)12s %(destination)12s "
1234 1255 "%(nbrevs)12d %(nbmissingfiles)12d\n")
1235 1256 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
1236 1257
1237 1258 if not revs:
1238 1259 revs = ['all()']
1239 1260 revs = scmutil.revrange(repo, revs)
1240 1261
1241 1262 roi = repo.revs('merge() and %ld', revs)
1242 1263 for r in roi:
1243 1264 ctx = repo[r]
1244 1265 p1 = ctx.p1().rev()
1245 1266 p2 = ctx.p2().rev()
1246 1267 bases = repo.changelog._commonancestorsheads(p1, p2)
1247 1268 for p in (p1, p2):
1248 1269 for b in bases:
1249 1270 base = repo[b]
1250 1271 parent = repo[p]
1251 1272 missing = copies._computeforwardmissing(base, parent)
1252 1273 if not missing:
1253 1274 continue
1254 1275 data = {
1255 1276 b'source': base.hex(),
1256 1277 b'destination': parent.hex(),
1257 1278 b'nbrevs': len(repo.revs('%d::%d', b, p)),
1258 1279 b'nbmissingfiles': len(missing),
1259 1280 }
1260 1281 if dotiming:
1261 1282 begin = util.timer()
1262 1283 renames = copies.pathcopies(base, parent)
1263 1284 end = util.timer()
1264 1285 # not very stable timing since we did only one run
1265 1286 data['time'] = end - begin
1266 1287 data['nbrenamedfiles'] = len(renames)
1267 1288 fm.startitem()
1268 1289 fm.data(**data)
1269 1290 out = data.copy()
1270 1291 out['source'] = fm.hexfunc(base.node())
1271 1292 out['destination'] = fm.hexfunc(parent.node())
1272 1293 fm.plain(output % out)
1273 1294
1274 1295 fm.end()
1275 1296
1276 1297 @command(b'perfcca', formatteropts)
1277 1298 def perfcca(ui, repo, **opts):
1278 1299 opts = _byteskwargs(opts)
1279 1300 timer, fm = gettimer(ui, opts)
1280 1301 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1281 1302 fm.end()
1282 1303
1283 1304 @command(b'perffncacheload', formatteropts)
1284 1305 def perffncacheload(ui, repo, **opts):
1285 1306 opts = _byteskwargs(opts)
1286 1307 timer, fm = gettimer(ui, opts)
1287 1308 s = repo.store
1288 1309 def d():
1289 1310 s.fncache._load()
1290 1311 timer(d)
1291 1312 fm.end()
1292 1313
1293 1314 @command(b'perffncachewrite', formatteropts)
1294 1315 def perffncachewrite(ui, repo, **opts):
1295 1316 opts = _byteskwargs(opts)
1296 1317 timer, fm = gettimer(ui, opts)
1297 1318 s = repo.store
1298 1319 lock = repo.lock()
1299 1320 s.fncache._load()
1300 1321 tr = repo.transaction(b'perffncachewrite')
1301 1322 tr.addbackup(b'fncache')
1302 1323 def d():
1303 1324 s.fncache._dirty = True
1304 1325 s.fncache.write(tr)
1305 1326 timer(d)
1306 1327 tr.close()
1307 1328 lock.release()
1308 1329 fm.end()
1309 1330
1310 1331 @command(b'perffncacheencode', formatteropts)
1311 1332 def perffncacheencode(ui, repo, **opts):
1312 1333 opts = _byteskwargs(opts)
1313 1334 timer, fm = gettimer(ui, opts)
1314 1335 s = repo.store
1315 1336 s.fncache._load()
1316 1337 def d():
1317 1338 for p in s.fncache.entries:
1318 1339 s.encode(p)
1319 1340 timer(d)
1320 1341 fm.end()
1321 1342
1322 1343 def _bdiffworker(q, blocks, xdiff, ready, done):
1323 1344 while not done.is_set():
1324 1345 pair = q.get()
1325 1346 while pair is not None:
1326 1347 if xdiff:
1327 1348 mdiff.bdiff.xdiffblocks(*pair)
1328 1349 elif blocks:
1329 1350 mdiff.bdiff.blocks(*pair)
1330 1351 else:
1331 1352 mdiff.textdiff(*pair)
1332 1353 q.task_done()
1333 1354 pair = q.get()
1334 1355 q.task_done() # for the None one
1335 1356 with ready:
1336 1357 ready.wait()
1337 1358
1338 1359 def _manifestrevision(repo, mnode):
1339 1360 ml = repo.manifestlog
1340 1361
1341 1362 if util.safehasattr(ml, b'getstorage'):
1342 1363 store = ml.getstorage(b'')
1343 1364 else:
1344 1365 store = ml._revlog
1345 1366
1346 1367 return store.revision(mnode)
1347 1368
1348 1369 @command(b'perfbdiff', revlogopts + formatteropts + [
1349 1370 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1350 1371 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1351 1372 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1352 1373 (b'', b'blocks', False, b'test computing diffs into blocks'),
1353 1374 (b'', b'xdiff', False, b'use xdiff algorithm'),
1354 1375 ],
1355 1376
1356 1377 b'-c|-m|FILE REV')
1357 1378 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1358 1379 """benchmark a bdiff between revisions
1359 1380
1360 1381 By default, benchmark a bdiff between its delta parent and itself.
1361 1382
1362 1383 With ``--count``, benchmark bdiffs between delta parents and self for N
1363 1384 revisions starting at the specified revision.
1364 1385
1365 1386 With ``--alldata``, assume the requested revision is a changeset and
1366 1387 measure bdiffs for all changes related to that changeset (manifest
1367 1388 and filelogs).
1368 1389 """
1369 1390 opts = _byteskwargs(opts)
1370 1391
1371 1392 if opts[b'xdiff'] and not opts[b'blocks']:
1372 1393 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1373 1394
1374 1395 if opts[b'alldata']:
1375 1396 opts[b'changelog'] = True
1376 1397
1377 1398 if opts.get(b'changelog') or opts.get(b'manifest'):
1378 1399 file_, rev = None, file_
1379 1400 elif rev is None:
1380 1401 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1381 1402
1382 1403 blocks = opts[b'blocks']
1383 1404 xdiff = opts[b'xdiff']
1384 1405 textpairs = []
1385 1406
1386 1407 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1387 1408
1388 1409 startrev = r.rev(r.lookup(rev))
1389 1410 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1390 1411 if opts[b'alldata']:
1391 1412 # Load revisions associated with changeset.
1392 1413 ctx = repo[rev]
1393 1414 mtext = _manifestrevision(repo, ctx.manifestnode())
1394 1415 for pctx in ctx.parents():
1395 1416 pman = _manifestrevision(repo, pctx.manifestnode())
1396 1417 textpairs.append((pman, mtext))
1397 1418
1398 1419 # Load filelog revisions by iterating manifest delta.
1399 1420 man = ctx.manifest()
1400 1421 pman = ctx.p1().manifest()
1401 1422 for filename, change in pman.diff(man).items():
1402 1423 fctx = repo.file(filename)
1403 1424 f1 = fctx.revision(change[0][0] or -1)
1404 1425 f2 = fctx.revision(change[1][0] or -1)
1405 1426 textpairs.append((f1, f2))
1406 1427 else:
1407 1428 dp = r.deltaparent(rev)
1408 1429 textpairs.append((r.revision(dp), r.revision(rev)))
1409 1430
1410 1431 withthreads = threads > 0
1411 1432 if not withthreads:
1412 1433 def d():
1413 1434 for pair in textpairs:
1414 1435 if xdiff:
1415 1436 mdiff.bdiff.xdiffblocks(*pair)
1416 1437 elif blocks:
1417 1438 mdiff.bdiff.blocks(*pair)
1418 1439 else:
1419 1440 mdiff.textdiff(*pair)
1420 1441 else:
1421 1442 q = queue()
1422 1443 for i in _xrange(threads):
1423 1444 q.put(None)
1424 1445 ready = threading.Condition()
1425 1446 done = threading.Event()
1426 1447 for i in _xrange(threads):
1427 1448 threading.Thread(target=_bdiffworker,
1428 1449 args=(q, blocks, xdiff, ready, done)).start()
1429 1450 q.join()
1430 1451 def d():
1431 1452 for pair in textpairs:
1432 1453 q.put(pair)
1433 1454 for i in _xrange(threads):
1434 1455 q.put(None)
1435 1456 with ready:
1436 1457 ready.notify_all()
1437 1458 q.join()
1438 1459 timer, fm = gettimer(ui, opts)
1439 1460 timer(d)
1440 1461 fm.end()
1441 1462
1442 1463 if withthreads:
1443 1464 done.set()
1444 1465 for i in _xrange(threads):
1445 1466 q.put(None)
1446 1467 with ready:
1447 1468 ready.notify_all()
1448 1469
1449 1470 @command(b'perfunidiff', revlogopts + formatteropts + [
1450 1471 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1451 1472 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
1452 1473 ], b'-c|-m|FILE REV')
1453 1474 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1454 1475 """benchmark a unified diff between revisions
1455 1476
1456 1477 This doesn't include any copy tracing - it's just a unified diff
1457 1478 of the texts.
1458 1479
1459 1480 By default, benchmark a diff between its delta parent and itself.
1460 1481
1461 1482 With ``--count``, benchmark diffs between delta parents and self for N
1462 1483 revisions starting at the specified revision.
1463 1484
1464 1485 With ``--alldata``, assume the requested revision is a changeset and
1465 1486 measure diffs for all changes related to that changeset (manifest
1466 1487 and filelogs).
1467 1488 """
1468 1489 opts = _byteskwargs(opts)
1469 1490 if opts[b'alldata']:
1470 1491 opts[b'changelog'] = True
1471 1492
1472 1493 if opts.get(b'changelog') or opts.get(b'manifest'):
1473 1494 file_, rev = None, file_
1474 1495 elif rev is None:
1475 1496 raise error.CommandError(b'perfunidiff', b'invalid arguments')
1476 1497
1477 1498 textpairs = []
1478 1499
1479 1500 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
1480 1501
1481 1502 startrev = r.rev(r.lookup(rev))
1482 1503 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1483 1504 if opts[b'alldata']:
1484 1505 # Load revisions associated with changeset.
1485 1506 ctx = repo[rev]
1486 1507 mtext = _manifestrevision(repo, ctx.manifestnode())
1487 1508 for pctx in ctx.parents():
1488 1509 pman = _manifestrevision(repo, pctx.manifestnode())
1489 1510 textpairs.append((pman, mtext))
1490 1511
1491 1512 # Load filelog revisions by iterating manifest delta.
1492 1513 man = ctx.manifest()
1493 1514 pman = ctx.p1().manifest()
1494 1515 for filename, change in pman.diff(man).items():
1495 1516 fctx = repo.file(filename)
1496 1517 f1 = fctx.revision(change[0][0] or -1)
1497 1518 f2 = fctx.revision(change[1][0] or -1)
1498 1519 textpairs.append((f1, f2))
1499 1520 else:
1500 1521 dp = r.deltaparent(rev)
1501 1522 textpairs.append((r.revision(dp), r.revision(rev)))
1502 1523
1503 1524 def d():
1504 1525 for left, right in textpairs:
1505 1526 # The date strings don't matter, so we pass empty strings.
1506 1527 headerlines, hunks = mdiff.unidiff(
1507 1528 left, b'', right, b'', b'left', b'right', binary=False)
1508 1529 # consume iterators in roughly the way patch.py does
1509 1530 b'\n'.join(headerlines)
1510 1531 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1511 1532 timer, fm = gettimer(ui, opts)
1512 1533 timer(d)
1513 1534 fm.end()
1514 1535
1515 1536 @command(b'perfdiffwd', formatteropts)
1516 1537 def perfdiffwd(ui, repo, **opts):
1517 1538 """Profile diff of working directory changes"""
1518 1539 opts = _byteskwargs(opts)
1519 1540 timer, fm = gettimer(ui, opts)
1520 1541 options = {
1521 1542 'w': 'ignore_all_space',
1522 1543 'b': 'ignore_space_change',
1523 1544 'B': 'ignore_blank_lines',
1524 1545 }
1525 1546
1526 1547 for diffopt in ('', 'w', 'b', 'B', 'wB'):
1527 1548 opts = dict((options[c], b'1') for c in diffopt)
1528 1549 def d():
1529 1550 ui.pushbuffer()
1530 1551 commands.diff(ui, repo, **opts)
1531 1552 ui.popbuffer()
1532 1553 diffopt = diffopt.encode('ascii')
1533 1554 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
1534 1555 timer(d, title=title)
1535 1556 fm.end()
1536 1557
1537 1558 @command(b'perfrevlogindex', revlogopts + formatteropts,
1538 1559 b'-c|-m|FILE')
1539 1560 def perfrevlogindex(ui, repo, file_=None, **opts):
1540 1561 """Benchmark operations against a revlog index.
1541 1562
1542 1563 This tests constructing a revlog instance, reading index data,
1543 1564 parsing index data, and performing various operations related to
1544 1565 index data.
1545 1566 """
1546 1567
1547 1568 opts = _byteskwargs(opts)
1548 1569
1549 1570 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
1550 1571
1551 1572 opener = getattr(rl, 'opener') # trick linter
1552 1573 indexfile = rl.indexfile
1553 1574 data = opener.read(indexfile)
1554 1575
1555 1576 header = struct.unpack(b'>I', data[0:4])[0]
1556 1577 version = header & 0xFFFF
1557 1578 if version == 1:
1558 1579 revlogio = revlog.revlogio()
1559 1580 inline = header & (1 << 16)
1560 1581 else:
1561 1582 raise error.Abort((b'unsupported revlog version: %d') % version)
1562 1583
1563 1584 rllen = len(rl)
1564 1585
1565 1586 node0 = rl.node(0)
1566 1587 node25 = rl.node(rllen // 4)
1567 1588 node50 = rl.node(rllen // 2)
1568 1589 node75 = rl.node(rllen // 4 * 3)
1569 1590 node100 = rl.node(rllen - 1)
1570 1591
1571 1592 allrevs = range(rllen)
1572 1593 allrevsrev = list(reversed(allrevs))
1573 1594 allnodes = [rl.node(rev) for rev in range(rllen)]
1574 1595 allnodesrev = list(reversed(allnodes))
1575 1596
1576 1597 def constructor():
1577 1598 revlog.revlog(opener, indexfile)
1578 1599
1579 1600 def read():
1580 1601 with opener(indexfile) as fh:
1581 1602 fh.read()
1582 1603
1583 1604 def parseindex():
1584 1605 revlogio.parseindex(data, inline)
1585 1606
1586 1607 def getentry(revornode):
1587 1608 index = revlogio.parseindex(data, inline)[0]
1588 1609 index[revornode]
1589 1610
1590 1611 def getentries(revs, count=1):
1591 1612 index = revlogio.parseindex(data, inline)[0]
1592 1613
1593 1614 for i in range(count):
1594 1615 for rev in revs:
1595 1616 index[rev]
1596 1617
1597 1618 def resolvenode(node):
1598 1619 nodemap = revlogio.parseindex(data, inline)[1]
1599 1620 # This only works for the C code.
1600 1621 if nodemap is None:
1601 1622 return
1602 1623
1603 1624 try:
1604 1625 nodemap[node]
1605 1626 except error.RevlogError:
1606 1627 pass
1607 1628
1608 1629 def resolvenodes(nodes, count=1):
1609 1630 nodemap = revlogio.parseindex(data, inline)[1]
1610 1631 if nodemap is None:
1611 1632 return
1612 1633
1613 1634 for i in range(count):
1614 1635 for node in nodes:
1615 1636 try:
1616 1637 nodemap[node]
1617 1638 except error.RevlogError:
1618 1639 pass
1619 1640
1620 1641 benches = [
1621 1642 (constructor, b'revlog constructor'),
1622 1643 (read, b'read'),
1623 1644 (parseindex, b'create index object'),
1624 1645 (lambda: getentry(0), b'retrieve index entry for rev 0'),
1625 1646 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
1626 1647 (lambda: resolvenode(node0), b'look up node at rev 0'),
1627 1648 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
1628 1649 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
1629 1650 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
1630 1651 (lambda: resolvenode(node100), b'look up node at tip'),
1631 1652 # 2x variation is to measure caching impact.
1632 1653 (lambda: resolvenodes(allnodes),
1633 1654 b'look up all nodes (forward)'),
1634 1655 (lambda: resolvenodes(allnodes, 2),
1635 1656 b'look up all nodes 2x (forward)'),
1636 1657 (lambda: resolvenodes(allnodesrev),
1637 1658 b'look up all nodes (reverse)'),
1638 1659 (lambda: resolvenodes(allnodesrev, 2),
1639 1660 b'look up all nodes 2x (reverse)'),
1640 1661 (lambda: getentries(allrevs),
1641 1662 b'retrieve all index entries (forward)'),
1642 1663 (lambda: getentries(allrevs, 2),
1643 1664 b'retrieve all index entries 2x (forward)'),
1644 1665 (lambda: getentries(allrevsrev),
1645 1666 b'retrieve all index entries (reverse)'),
1646 1667 (lambda: getentries(allrevsrev, 2),
1647 1668 b'retrieve all index entries 2x (reverse)'),
1648 1669 ]
1649 1670
1650 1671 for fn, title in benches:
1651 1672 timer, fm = gettimer(ui, opts)
1652 1673 timer(fn, title=title)
1653 1674 fm.end()
1654 1675
1655 1676 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
1656 1677 [(b'd', b'dist', 100, b'distance between the revisions'),
1657 1678 (b's', b'startrev', 0, b'revision to start reading at'),
1658 1679 (b'', b'reverse', False, b'read in reverse')],
1659 1680 b'-c|-m|FILE')
1660 1681 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
1661 1682 **opts):
1662 1683 """Benchmark reading a series of revisions from a revlog.
1663 1684
1664 1685 By default, we read every ``-d/--dist`` revision from 0 to tip of
1665 1686 the specified revlog.
1666 1687
1667 1688 The start revision can be defined via ``-s/--startrev``.
1668 1689 """
1669 1690 opts = _byteskwargs(opts)
1670 1691
1671 1692 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
1672 1693 rllen = getlen(ui)(rl)
1673 1694
1674 1695 if startrev < 0:
1675 1696 startrev = rllen + startrev
1676 1697
1677 1698 def d():
1678 1699 rl.clearcaches()
1679 1700
1680 1701 beginrev = startrev
1681 1702 endrev = rllen
1682 1703 dist = opts[b'dist']
1683 1704
1684 1705 if reverse:
1685 1706 beginrev, endrev = endrev - 1, beginrev - 1
1686 1707 dist = -1 * dist
1687 1708
1688 1709 for x in _xrange(beginrev, endrev, dist):
1689 1710 # Old revisions don't support passing int.
1690 1711 n = rl.node(x)
1691 1712 rl.revision(n)
1692 1713
1693 1714 timer, fm = gettimer(ui, opts)
1694 1715 timer(d)
1695 1716 fm.end()
1696 1717
1697 1718 @command(b'perfrevlogwrite', revlogopts + formatteropts +
1698 1719 [(b's', b'startrev', 1000, b'revision to start writing at'),
1699 1720 (b'', b'stoprev', -1, b'last revision to write'),
1700 1721 (b'', b'count', 3, b'last revision to write'),
1701 1722 (b'', b'details', False, b'print timing for every revisions tested'),
1702 1723 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
1703 1724 (b'', b'lazydeltabase', True, b'try the provided delta first'),
1704 1725 ],
1705 1726 b'-c|-m|FILE')
1706 1727 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
1707 1728 """Benchmark writing a series of revisions to a revlog.
1708 1729
1709 1730 Possible source values are:
1710 1731 * `full`: add from a full text (default).
1711 1732 * `parent-1`: add from a delta to the first parent
1712 1733 * `parent-2`: add from a delta to the second parent if it exists
1713 1734 (use a delta from the first parent otherwise)
1714 1735 * `parent-smallest`: add from the smallest delta (either p1 or p2)
1715 1736 * `storage`: add from the existing precomputed deltas
1716 1737 """
1717 1738 opts = _byteskwargs(opts)
1718 1739
1719 1740 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
1720 1741 rllen = getlen(ui)(rl)
1721 1742 if startrev < 0:
1722 1743 startrev = rllen + startrev
1723 1744 if stoprev < 0:
1724 1745 stoprev = rllen + stoprev
1725 1746
1726 1747 lazydeltabase = opts['lazydeltabase']
1727 1748 source = opts['source']
1728 1749 validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest',
1729 1750 b'storage')
1730 1751 if source not in validsource:
1731 1752 raise error.Abort('invalid source type: %s' % source)
1732 1753
1733 1754 ### actually gather results
1734 1755 count = opts['count']
1735 1756 if count <= 0:
1736 1757 raise error.Abort('invalide run count: %d' % count)
1737 1758 allresults = []
1738 1759 for c in range(count):
1739 1760 timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1,
1740 1761 lazydeltabase=lazydeltabase)
1741 1762 allresults.append(timing)
1742 1763
1743 1764 ### consolidate the results in a single list
1744 1765 results = []
1745 1766 for idx, (rev, t) in enumerate(allresults[0]):
1746 1767 ts = [t]
1747 1768 for other in allresults[1:]:
1748 1769 orev, ot = other[idx]
1749 1770 assert orev == rev
1750 1771 ts.append(ot)
1751 1772 results.append((rev, ts))
1752 1773 resultcount = len(results)
1753 1774
1754 1775 ### Compute and display relevant statistics
1755 1776
1756 1777 # get a formatter
1757 1778 fm = ui.formatter(b'perf', opts)
1758 1779 displayall = ui.configbool(b"perf", b"all-timing", False)
1759 1780
1760 1781 # print individual details if requested
1761 1782 if opts['details']:
1762 1783 for idx, item in enumerate(results, 1):
1763 1784 rev, data = item
1764 1785 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
1765 1786 formatone(fm, data, title=title, displayall=displayall)
1766 1787
1767 1788 # sorts results by median time
1768 1789 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
1769 1790 # list of (name, index) to display)
1770 1791 relevants = [
1771 1792 ("min", 0),
1772 1793 ("10%", resultcount * 10 // 100),
1773 1794 ("25%", resultcount * 25 // 100),
1774 1795 ("50%", resultcount * 70 // 100),
1775 1796 ("75%", resultcount * 75 // 100),
1776 1797 ("90%", resultcount * 90 // 100),
1777 1798 ("95%", resultcount * 95 // 100),
1778 1799 ("99%", resultcount * 99 // 100),
1779 1800 ("max", -1),
1780 1801 ]
1781 1802 if not ui.quiet:
1782 1803 for name, idx in relevants:
1783 1804 data = results[idx]
1784 1805 title = '%s of %d, rev %d' % (name, resultcount, data[0])
1785 1806 formatone(fm, data[1], title=title, displayall=displayall)
1786 1807
1787 1808 # XXX summing that many float will not be very precise, we ignore this fact
1788 1809 # for now
1789 1810 totaltime = []
1790 1811 for item in allresults:
1791 1812 totaltime.append((sum(x[1][0] for x in item),
1792 1813 sum(x[1][1] for x in item),
1793 1814 sum(x[1][2] for x in item),)
1794 1815 )
1795 1816 formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
1796 1817 displayall=displayall)
1797 1818 fm.end()
1798 1819
1799 1820 class _faketr(object):
1800 1821 def add(s, x, y, z=None):
1801 1822 return None
1802 1823
1803 1824 def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None,
1804 1825 lazydeltabase=True):
1805 1826 timings = []
1806 1827 tr = _faketr()
1807 1828 with _temprevlog(ui, orig, startrev) as dest:
1808 1829 dest._lazydeltabase = lazydeltabase
1809 1830 revs = list(orig.revs(startrev, stoprev))
1810 1831 total = len(revs)
1811 1832 topic = 'adding'
1812 1833 if runidx is not None:
1813 1834 topic += ' (run #%d)' % runidx
1814 1835 for idx, rev in enumerate(revs):
1815 1836 ui.progress(topic, idx, unit='revs', total=total)
1816 1837 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
1817 1838 with timeone() as r:
1818 1839 dest.addrawrevision(*addargs, **addkwargs)
1819 1840 timings.append((rev, r[0]))
1820 1841 ui.progress(topic, total, unit='revs', total=total)
1821 1842 ui.progress(topic, None, unit='revs', total=total)
1822 1843 return timings
1823 1844
1824 1845 def _getrevisionseed(orig, rev, tr, source):
1825 1846 from mercurial.node import nullid
1826 1847
1827 1848 linkrev = orig.linkrev(rev)
1828 1849 node = orig.node(rev)
1829 1850 p1, p2 = orig.parents(node)
1830 1851 flags = orig.flags(rev)
1831 1852 cachedelta = None
1832 1853 text = None
1833 1854
1834 1855 if source == b'full':
1835 1856 text = orig.revision(rev)
1836 1857 elif source == b'parent-1':
1837 1858 baserev = orig.rev(p1)
1838 1859 cachedelta = (baserev, orig.revdiff(p1, rev))
1839 1860 elif source == b'parent-2':
1840 1861 parent = p2
1841 1862 if p2 == nullid:
1842 1863 parent = p1
1843 1864 baserev = orig.rev(parent)
1844 1865 cachedelta = (baserev, orig.revdiff(parent, rev))
1845 1866 elif source == b'parent-smallest':
1846 1867 p1diff = orig.revdiff(p1, rev)
1847 1868 parent = p1
1848 1869 diff = p1diff
1849 1870 if p2 != nullid:
1850 1871 p2diff = orig.revdiff(p2, rev)
1851 1872 if len(p1diff) > len(p2diff):
1852 1873 parent = p2
1853 1874 diff = p2diff
1854 1875 baserev = orig.rev(parent)
1855 1876 cachedelta = (baserev, diff)
1856 1877 elif source == b'storage':
1857 1878 baserev = orig.deltaparent(rev)
1858 1879 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
1859 1880
1860 1881 return ((text, tr, linkrev, p1, p2),
1861 1882 {'node': node, 'flags': flags, 'cachedelta': cachedelta})
1862 1883
1863 1884 @contextlib.contextmanager
1864 1885 def _temprevlog(ui, orig, truncaterev):
1865 1886 from mercurial import vfs as vfsmod
1866 1887
1867 1888 if orig._inline:
1868 1889 raise error.Abort('not supporting inline revlog (yet)')
1869 1890
1870 1891 origindexpath = orig.opener.join(orig.indexfile)
1871 1892 origdatapath = orig.opener.join(orig.datafile)
1872 1893 indexname = 'revlog.i'
1873 1894 dataname = 'revlog.d'
1874 1895
1875 1896 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
1876 1897 try:
1877 1898 # copy the data file in a temporary directory
1878 1899 ui.debug('copying data in %s\n' % tmpdir)
1879 1900 destindexpath = os.path.join(tmpdir, 'revlog.i')
1880 1901 destdatapath = os.path.join(tmpdir, 'revlog.d')
1881 1902 shutil.copyfile(origindexpath, destindexpath)
1882 1903 shutil.copyfile(origdatapath, destdatapath)
1883 1904
1884 1905 # remove the data we want to add again
1885 1906 ui.debug('truncating data to be rewritten\n')
1886 1907 with open(destindexpath, 'ab') as index:
1887 1908 index.seek(0)
1888 1909 index.truncate(truncaterev * orig._io.size)
1889 1910 with open(destdatapath, 'ab') as data:
1890 1911 data.seek(0)
1891 1912 data.truncate(orig.start(truncaterev))
1892 1913
1893 1914 # instantiate a new revlog from the temporary copy
1894 1915 ui.debug('truncating adding to be rewritten\n')
1895 1916 vfs = vfsmod.vfs(tmpdir)
1896 1917 vfs.options = getattr(orig.opener, 'options', None)
1897 1918
1898 1919 dest = revlog.revlog(vfs,
1899 1920 indexfile=indexname,
1900 1921 datafile=dataname)
1901 1922 if dest._inline:
1902 1923 raise error.Abort('not supporting inline revlog (yet)')
1903 1924 # make sure internals are initialized
1904 1925 dest.revision(len(dest) - 1)
1905 1926 yield dest
1906 1927 del dest, vfs
1907 1928 finally:
1908 1929 shutil.rmtree(tmpdir, True)
1909 1930
1910 1931 @command(b'perfrevlogchunks', revlogopts + formatteropts +
1911 1932 [(b'e', b'engines', b'', b'compression engines to use'),
1912 1933 (b's', b'startrev', 0, b'revision to start at')],
1913 1934 b'-c|-m|FILE')
1914 1935 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
1915 1936 """Benchmark operations on revlog chunks.
1916 1937
1917 1938 Logically, each revlog is a collection of fulltext revisions. However,
1918 1939 stored within each revlog are "chunks" of possibly compressed data. This
1919 1940 data needs to be read and decompressed or compressed and written.
1920 1941
1921 1942 This command measures the time it takes to read+decompress and recompress
1922 1943 chunks in a revlog. It effectively isolates I/O and compression performance.
1923 1944 For measurements of higher-level operations like resolving revisions,
1924 1945 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
1925 1946 """
1926 1947 opts = _byteskwargs(opts)
1927 1948
1928 1949 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
1929 1950
1930 1951 # _chunkraw was renamed to _getsegmentforrevs.
1931 1952 try:
1932 1953 segmentforrevs = rl._getsegmentforrevs
1933 1954 except AttributeError:
1934 1955 segmentforrevs = rl._chunkraw
1935 1956
1936 1957 # Verify engines argument.
1937 1958 if engines:
1938 1959 engines = set(e.strip() for e in engines.split(b','))
1939 1960 for engine in engines:
1940 1961 try:
1941 1962 util.compressionengines[engine]
1942 1963 except KeyError:
1943 1964 raise error.Abort(b'unknown compression engine: %s' % engine)
1944 1965 else:
1945 1966 engines = []
1946 1967 for e in util.compengines:
1947 1968 engine = util.compengines[e]
1948 1969 try:
1949 1970 if engine.available():
1950 1971 engine.revlogcompressor().compress(b'dummy')
1951 1972 engines.append(e)
1952 1973 except NotImplementedError:
1953 1974 pass
1954 1975
1955 1976 revs = list(rl.revs(startrev, len(rl) - 1))
1956 1977
1957 1978 def rlfh(rl):
1958 1979 if rl._inline:
1959 1980 return getsvfs(repo)(rl.indexfile)
1960 1981 else:
1961 1982 return getsvfs(repo)(rl.datafile)
1962 1983
1963 1984 def doread():
1964 1985 rl.clearcaches()
1965 1986 for rev in revs:
1966 1987 segmentforrevs(rev, rev)
1967 1988
1968 1989 def doreadcachedfh():
1969 1990 rl.clearcaches()
1970 1991 fh = rlfh(rl)
1971 1992 for rev in revs:
1972 1993 segmentforrevs(rev, rev, df=fh)
1973 1994
1974 1995 def doreadbatch():
1975 1996 rl.clearcaches()
1976 1997 segmentforrevs(revs[0], revs[-1])
1977 1998
1978 1999 def doreadbatchcachedfh():
1979 2000 rl.clearcaches()
1980 2001 fh = rlfh(rl)
1981 2002 segmentforrevs(revs[0], revs[-1], df=fh)
1982 2003
1983 2004 def dochunk():
1984 2005 rl.clearcaches()
1985 2006 fh = rlfh(rl)
1986 2007 for rev in revs:
1987 2008 rl._chunk(rev, df=fh)
1988 2009
1989 2010 chunks = [None]
1990 2011
1991 2012 def dochunkbatch():
1992 2013 rl.clearcaches()
1993 2014 fh = rlfh(rl)
1994 2015 # Save chunks as a side-effect.
1995 2016 chunks[0] = rl._chunks(revs, df=fh)
1996 2017
1997 2018 def docompress(compressor):
1998 2019 rl.clearcaches()
1999 2020
2000 2021 try:
2001 2022 # Swap in the requested compression engine.
2002 2023 oldcompressor = rl._compressor
2003 2024 rl._compressor = compressor
2004 2025 for chunk in chunks[0]:
2005 2026 rl.compress(chunk)
2006 2027 finally:
2007 2028 rl._compressor = oldcompressor
2008 2029
2009 2030 benches = [
2010 2031 (lambda: doread(), b'read'),
2011 2032 (lambda: doreadcachedfh(), b'read w/ reused fd'),
2012 2033 (lambda: doreadbatch(), b'read batch'),
2013 2034 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
2014 2035 (lambda: dochunk(), b'chunk'),
2015 2036 (lambda: dochunkbatch(), b'chunk batch'),
2016 2037 ]
2017 2038
2018 2039 for engine in sorted(engines):
2019 2040 compressor = util.compengines[engine].revlogcompressor()
2020 2041 benches.append((functools.partial(docompress, compressor),
2021 2042 b'compress w/ %s' % engine))
2022 2043
2023 2044 for fn, title in benches:
2024 2045 timer, fm = gettimer(ui, opts)
2025 2046 timer(fn, title=title)
2026 2047 fm.end()
2027 2048
2028 2049 @command(b'perfrevlogrevision', revlogopts + formatteropts +
2029 2050 [(b'', b'cache', False, b'use caches instead of clearing')],
2030 2051 b'-c|-m|FILE REV')
2031 2052 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
2032 2053 """Benchmark obtaining a revlog revision.
2033 2054
2034 2055 Obtaining a revlog revision consists of roughly the following steps:
2035 2056
2036 2057 1. Compute the delta chain
2037 2058 2. Slice the delta chain if applicable
2038 2059 3. Obtain the raw chunks for that delta chain
2039 2060 4. Decompress each raw chunk
2040 2061 5. Apply binary patches to obtain fulltext
2041 2062 6. Verify hash of fulltext
2042 2063
2043 2064 This command measures the time spent in each of these phases.
2044 2065 """
2045 2066 opts = _byteskwargs(opts)
2046 2067
2047 2068 if opts.get(b'changelog') or opts.get(b'manifest'):
2048 2069 file_, rev = None, file_
2049 2070 elif rev is None:
2050 2071 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
2051 2072
2052 2073 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
2053 2074
2054 2075 # _chunkraw was renamed to _getsegmentforrevs.
2055 2076 try:
2056 2077 segmentforrevs = r._getsegmentforrevs
2057 2078 except AttributeError:
2058 2079 segmentforrevs = r._chunkraw
2059 2080
2060 2081 node = r.lookup(rev)
2061 2082 rev = r.rev(node)
2062 2083
2063 2084 def getrawchunks(data, chain):
2064 2085 start = r.start
2065 2086 length = r.length
2066 2087 inline = r._inline
2067 2088 iosize = r._io.size
2068 2089 buffer = util.buffer
2069 2090
2070 2091 chunks = []
2071 2092 ladd = chunks.append
2072 2093 for idx, item in enumerate(chain):
2073 2094 offset = start(item[0])
2074 2095 bits = data[idx]
2075 2096 for rev in item:
2076 2097 chunkstart = start(rev)
2077 2098 if inline:
2078 2099 chunkstart += (rev + 1) * iosize
2079 2100 chunklength = length(rev)
2080 2101 ladd(buffer(bits, chunkstart - offset, chunklength))
2081 2102
2082 2103 return chunks
2083 2104
2084 2105 def dodeltachain(rev):
2085 2106 if not cache:
2086 2107 r.clearcaches()
2087 2108 r._deltachain(rev)
2088 2109
2089 2110 def doread(chain):
2090 2111 if not cache:
2091 2112 r.clearcaches()
2092 2113 for item in slicedchain:
2093 2114 segmentforrevs(item[0], item[-1])
2094 2115
2095 2116 def doslice(r, chain, size):
2096 2117 for s in slicechunk(r, chain, targetsize=size):
2097 2118 pass
2098 2119
2099 2120 def dorawchunks(data, chain):
2100 2121 if not cache:
2101 2122 r.clearcaches()
2102 2123 getrawchunks(data, chain)
2103 2124
2104 2125 def dodecompress(chunks):
2105 2126 decomp = r.decompress
2106 2127 for chunk in chunks:
2107 2128 decomp(chunk)
2108 2129
2109 2130 def dopatch(text, bins):
2110 2131 if not cache:
2111 2132 r.clearcaches()
2112 2133 mdiff.patches(text, bins)
2113 2134
2114 2135 def dohash(text):
2115 2136 if not cache:
2116 2137 r.clearcaches()
2117 2138 r.checkhash(text, node, rev=rev)
2118 2139
2119 2140 def dorevision():
2120 2141 if not cache:
2121 2142 r.clearcaches()
2122 2143 r.revision(node)
2123 2144
2124 2145 try:
2125 2146 from mercurial.revlogutils.deltas import slicechunk
2126 2147 except ImportError:
2127 2148 slicechunk = getattr(revlog, '_slicechunk', None)
2128 2149
2129 2150 size = r.length(rev)
2130 2151 chain = r._deltachain(rev)[0]
2131 2152 if not getattr(r, '_withsparseread', False):
2132 2153 slicedchain = (chain,)
2133 2154 else:
2134 2155 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
2135 2156 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
2136 2157 rawchunks = getrawchunks(data, slicedchain)
2137 2158 bins = r._chunks(chain)
2138 2159 text = bytes(bins[0])
2139 2160 bins = bins[1:]
2140 2161 text = mdiff.patches(text, bins)
2141 2162
2142 2163 benches = [
2143 2164 (lambda: dorevision(), b'full'),
2144 2165 (lambda: dodeltachain(rev), b'deltachain'),
2145 2166 (lambda: doread(chain), b'read'),
2146 2167 ]
2147 2168
2148 2169 if getattr(r, '_withsparseread', False):
2149 2170 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
2150 2171 benches.append(slicing)
2151 2172
2152 2173 benches.extend([
2153 2174 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
2154 2175 (lambda: dodecompress(rawchunks), b'decompress'),
2155 2176 (lambda: dopatch(text, bins), b'patch'),
2156 2177 (lambda: dohash(text), b'hash'),
2157 2178 ])
2158 2179
2159 2180 timer, fm = gettimer(ui, opts)
2160 2181 for fn, title in benches:
2161 2182 timer(fn, title=title)
2162 2183 fm.end()
2163 2184
2164 2185 @command(b'perfrevset',
2165 2186 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
2166 2187 (b'', b'contexts', False, b'obtain changectx for each revision')]
2167 2188 + formatteropts, b"REVSET")
2168 2189 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
2169 2190 """benchmark the execution time of a revset
2170 2191
2171 2192 Use the --clean option if need to evaluate the impact of build volatile
2172 2193 revisions set cache on the revset execution. Volatile cache hold filtered
2173 2194 and obsolete related cache."""
2174 2195 opts = _byteskwargs(opts)
2175 2196
2176 2197 timer, fm = gettimer(ui, opts)
2177 2198 def d():
2178 2199 if clear:
2179 2200 repo.invalidatevolatilesets()
2180 2201 if contexts:
2181 2202 for ctx in repo.set(expr): pass
2182 2203 else:
2183 2204 for r in repo.revs(expr): pass
2184 2205 timer(d)
2185 2206 fm.end()
2186 2207
2187 2208 @command(b'perfvolatilesets',
2188 2209 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
2189 2210 ] + formatteropts)
2190 2211 def perfvolatilesets(ui, repo, *names, **opts):
2191 2212 """benchmark the computation of various volatile set
2192 2213
2193 2214 Volatile set computes element related to filtering and obsolescence."""
2194 2215 opts = _byteskwargs(opts)
2195 2216 timer, fm = gettimer(ui, opts)
2196 2217 repo = repo.unfiltered()
2197 2218
2198 2219 def getobs(name):
2199 2220 def d():
2200 2221 repo.invalidatevolatilesets()
2201 2222 if opts[b'clear_obsstore']:
2202 2223 clearfilecache(repo, b'obsstore')
2203 2224 obsolete.getrevs(repo, name)
2204 2225 return d
2205 2226
2206 2227 allobs = sorted(obsolete.cachefuncs)
2207 2228 if names:
2208 2229 allobs = [n for n in allobs if n in names]
2209 2230
2210 2231 for name in allobs:
2211 2232 timer(getobs(name), title=name)
2212 2233
2213 2234 def getfiltered(name):
2214 2235 def d():
2215 2236 repo.invalidatevolatilesets()
2216 2237 if opts[b'clear_obsstore']:
2217 2238 clearfilecache(repo, b'obsstore')
2218 2239 repoview.filterrevs(repo, name)
2219 2240 return d
2220 2241
2221 2242 allfilter = sorted(repoview.filtertable)
2222 2243 if names:
2223 2244 allfilter = [n for n in allfilter if n in names]
2224 2245
2225 2246 for name in allfilter:
2226 2247 timer(getfiltered(name), title=name)
2227 2248 fm.end()
2228 2249
2229 2250 @command(b'perfbranchmap',
2230 2251 [(b'f', b'full', False,
2231 2252 b'Includes build time of subset'),
2232 2253 (b'', b'clear-revbranch', False,
2233 2254 b'purge the revbranch cache between computation'),
2234 2255 ] + formatteropts)
2235 2256 def perfbranchmap(ui, repo, *filternames, **opts):
2236 2257 """benchmark the update of a branchmap
2237 2258
2238 2259 This benchmarks the full repo.branchmap() call with read and write disabled
2239 2260 """
2240 2261 opts = _byteskwargs(opts)
2241 2262 full = opts.get(b"full", False)
2242 2263 clear_revbranch = opts.get(b"clear_revbranch", False)
2243 2264 timer, fm = gettimer(ui, opts)
2244 2265 def getbranchmap(filtername):
2245 2266 """generate a benchmark function for the filtername"""
2246 2267 if filtername is None:
2247 2268 view = repo
2248 2269 else:
2249 2270 view = repo.filtered(filtername)
2250 2271 def d():
2251 2272 if clear_revbranch:
2252 2273 repo.revbranchcache()._clear()
2253 2274 if full:
2254 2275 view._branchcaches.clear()
2255 2276 else:
2256 2277 view._branchcaches.pop(filtername, None)
2257 2278 view.branchmap()
2258 2279 return d
2259 2280 # add filter in smaller subset to bigger subset
2260 2281 possiblefilters = set(repoview.filtertable)
2261 2282 if filternames:
2262 2283 possiblefilters &= set(filternames)
2263 2284 subsettable = getbranchmapsubsettable()
2264 2285 allfilters = []
2265 2286 while possiblefilters:
2266 2287 for name in possiblefilters:
2267 2288 subset = subsettable.get(name)
2268 2289 if subset not in possiblefilters:
2269 2290 break
2270 2291 else:
2271 2292 assert False, b'subset cycle %s!' % possiblefilters
2272 2293 allfilters.append(name)
2273 2294 possiblefilters.remove(name)
2274 2295
2275 2296 # warm the cache
2276 2297 if not full:
2277 2298 for name in allfilters:
2278 2299 repo.filtered(name).branchmap()
2279 2300 if not filternames or b'unfiltered' in filternames:
2280 2301 # add unfiltered
2281 2302 allfilters.append(None)
2282 2303
2283 2304 branchcacheread = safeattrsetter(branchmap, b'read')
2284 2305 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
2285 2306 branchcacheread.set(lambda repo: None)
2286 2307 branchcachewrite.set(lambda bc, repo: None)
2287 2308 try:
2288 2309 for name in allfilters:
2289 2310 printname = name
2290 2311 if name is None:
2291 2312 printname = b'unfiltered'
2292 2313 timer(getbranchmap(name), title=str(printname))
2293 2314 finally:
2294 2315 branchcacheread.restore()
2295 2316 branchcachewrite.restore()
2296 2317 fm.end()
2297 2318
2298 2319 @command(b'perfbranchmapupdate', [
2299 2320 (b'', b'base', [], b'subset of revision to start from'),
2300 2321 (b'', b'target', [], b'subset of revision to end with'),
2301 2322 (b'', b'clear-caches', False, b'clear cache between each runs')
2302 2323 ] + formatteropts)
2303 2324 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
2304 2325 """benchmark branchmap update from for <base> revs to <target> revs
2305 2326
2306 2327 If `--clear-caches` is passed, the following items will be reset before
2307 2328 each update:
2308 2329 * the changelog instance and associated indexes
2309 2330 * the rev-branch-cache instance
2310 2331
2311 2332 Examples:
2312 2333
2313 2334 # update for the one last revision
2314 2335 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
2315 2336
2316 2337 $ update for change coming with a new branch
2317 2338 $ hg perfbranchmapupdate --base 'stable' --target 'default'
2318 2339 """
2319 2340 from mercurial import branchmap
2320 2341 from mercurial import repoview
2321 2342 opts = _byteskwargs(opts)
2322 2343 timer, fm = gettimer(ui, opts)
2323 2344 clearcaches = opts[b'clear_caches']
2324 2345 unfi = repo.unfiltered()
2325 2346 x = [None] # used to pass data between closure
2326 2347
2327 2348 # we use a `list` here to avoid possible side effect from smartset
2328 2349 baserevs = list(scmutil.revrange(repo, base))
2329 2350 targetrevs = list(scmutil.revrange(repo, target))
2330 2351 if not baserevs:
2331 2352 raise error.Abort(b'no revisions selected for --base')
2332 2353 if not targetrevs:
2333 2354 raise error.Abort(b'no revisions selected for --target')
2334 2355
2335 2356 # make sure the target branchmap also contains the one in the base
2336 2357 targetrevs = list(set(baserevs) | set(targetrevs))
2337 2358 targetrevs.sort()
2338 2359
2339 2360 cl = repo.changelog
2340 2361 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
2341 2362 allbaserevs.sort()
2342 2363 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
2343 2364
2344 2365 newrevs = list(alltargetrevs.difference(allbaserevs))
2345 2366 newrevs.sort()
2346 2367
2347 2368 allrevs = frozenset(unfi.changelog.revs())
2348 2369 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
2349 2370 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
2350 2371
2351 2372 def basefilter(repo, visibilityexceptions=None):
2352 2373 return basefilterrevs
2353 2374
2354 2375 def targetfilter(repo, visibilityexceptions=None):
2355 2376 return targetfilterrevs
2356 2377
2357 2378 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
2358 2379 ui.status(msg % (len(allbaserevs), len(newrevs)))
2359 2380 if targetfilterrevs:
2360 2381 msg = b'(%d revisions still filtered)\n'
2361 2382 ui.status(msg % len(targetfilterrevs))
2362 2383
2363 2384 try:
2364 2385 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
2365 2386 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
2366 2387
2367 2388 baserepo = repo.filtered(b'__perf_branchmap_update_base')
2368 2389 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
2369 2390
2370 2391 # try to find an existing branchmap to reuse
2371 2392 subsettable = getbranchmapsubsettable()
2372 2393 candidatefilter = subsettable.get(None)
2373 2394 while candidatefilter is not None:
2374 2395 candidatebm = repo.filtered(candidatefilter).branchmap()
2375 2396 if candidatebm.validfor(baserepo):
2376 2397 filtered = repoview.filterrevs(repo, candidatefilter)
2377 2398 missing = [r for r in allbaserevs if r in filtered]
2378 2399 base = candidatebm.copy()
2379 2400 base.update(baserepo, missing)
2380 2401 break
2381 2402 candidatefilter = subsettable.get(candidatefilter)
2382 2403 else:
2383 2404 # no suitable subset where found
2384 2405 base = branchmap.branchcache()
2385 2406 base.update(baserepo, allbaserevs)
2386 2407
2387 2408 def setup():
2388 2409 x[0] = base.copy()
2389 2410 if clearcaches:
2390 2411 unfi._revbranchcache = None
2391 2412 clearchangelog(repo)
2392 2413
2393 2414 def bench():
2394 2415 x[0].update(targetrepo, newrevs)
2395 2416
2396 2417 timer(bench, setup=setup)
2397 2418 fm.end()
2398 2419 finally:
2399 2420 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
2400 2421 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
2401 2422
2402 2423 @command(b'perfbranchmapload', [
2403 2424 (b'f', b'filter', b'', b'Specify repoview filter'),
2404 2425 (b'', b'list', False, b'List brachmap filter caches'),
2405 2426 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
2406 2427
2407 2428 ] + formatteropts)
2408 2429 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
2409 2430 """benchmark reading the branchmap"""
2410 2431 opts = _byteskwargs(opts)
2411 2432 clearrevlogs = opts[b'clear_revlogs']
2412 2433
2413 2434 if list:
2414 2435 for name, kind, st in repo.cachevfs.readdir(stat=True):
2415 2436 if name.startswith(b'branch2'):
2416 2437 filtername = name.partition(b'-')[2] or b'unfiltered'
2417 2438 ui.status(b'%s - %s\n'
2418 2439 % (filtername, util.bytecount(st.st_size)))
2419 2440 return
2420 2441 if not filter:
2421 2442 filter = None
2422 2443 subsettable = getbranchmapsubsettable()
2423 2444 if filter is None:
2424 2445 repo = repo.unfiltered()
2425 2446 else:
2426 2447 repo = repoview.repoview(repo, filter)
2427 2448
2428 2449 repo.branchmap() # make sure we have a relevant, up to date branchmap
2429 2450
2430 2451 currentfilter = filter
2431 2452 # try once without timer, the filter may not be cached
2432 2453 while branchmap.read(repo) is None:
2433 2454 currentfilter = subsettable.get(currentfilter)
2434 2455 if currentfilter is None:
2435 2456 raise error.Abort(b'No branchmap cached for %s repo'
2436 2457 % (filter or b'unfiltered'))
2437 2458 repo = repo.filtered(currentfilter)
2438 2459 timer, fm = gettimer(ui, opts)
2439 2460 def setup():
2440 2461 if clearrevlogs:
2441 2462 clearchangelog(repo)
2442 2463 def bench():
2443 2464 branchmap.read(repo)
2444 2465 timer(bench, setup=setup)
2445 2466 fm.end()
2446 2467
2447 2468 @command(b'perfloadmarkers')
2448 2469 def perfloadmarkers(ui, repo):
2449 2470 """benchmark the time to parse the on-disk markers for a repo
2450 2471
2451 2472 Result is the number of markers in the repo."""
2452 2473 timer, fm = gettimer(ui)
2453 2474 svfs = getsvfs(repo)
2454 2475 timer(lambda: len(obsolete.obsstore(svfs)))
2455 2476 fm.end()
2456 2477
2457 2478 @command(b'perflrucachedict', formatteropts +
2458 2479 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
2459 2480 (b'', b'mincost', 0, b'smallest cost of items in cache'),
2460 2481 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
2461 2482 (b'', b'size', 4, b'size of cache'),
2462 2483 (b'', b'gets', 10000, b'number of key lookups'),
2463 2484 (b'', b'sets', 10000, b'number of key sets'),
2464 2485 (b'', b'mixed', 10000, b'number of mixed mode operations'),
2465 2486 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
2466 2487 norepo=True)
2467 2488 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
2468 2489 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
2469 2490 opts = _byteskwargs(opts)
2470 2491
2471 2492 def doinit():
2472 2493 for i in _xrange(10000):
2473 2494 util.lrucachedict(size)
2474 2495
2475 2496 costrange = list(range(mincost, maxcost + 1))
2476 2497
2477 2498 values = []
2478 2499 for i in _xrange(size):
2479 2500 values.append(random.randint(0, _maxint))
2480 2501
2481 2502 # Get mode fills the cache and tests raw lookup performance with no
2482 2503 # eviction.
2483 2504 getseq = []
2484 2505 for i in _xrange(gets):
2485 2506 getseq.append(random.choice(values))
2486 2507
2487 2508 def dogets():
2488 2509 d = util.lrucachedict(size)
2489 2510 for v in values:
2490 2511 d[v] = v
2491 2512 for key in getseq:
2492 2513 value = d[key]
2493 2514 value # silence pyflakes warning
2494 2515
2495 2516 def dogetscost():
2496 2517 d = util.lrucachedict(size, maxcost=costlimit)
2497 2518 for i, v in enumerate(values):
2498 2519 d.insert(v, v, cost=costs[i])
2499 2520 for key in getseq:
2500 2521 try:
2501 2522 value = d[key]
2502 2523 value # silence pyflakes warning
2503 2524 except KeyError:
2504 2525 pass
2505 2526
2506 2527 # Set mode tests insertion speed with cache eviction.
2507 2528 setseq = []
2508 2529 costs = []
2509 2530 for i in _xrange(sets):
2510 2531 setseq.append(random.randint(0, _maxint))
2511 2532 costs.append(random.choice(costrange))
2512 2533
2513 2534 def doinserts():
2514 2535 d = util.lrucachedict(size)
2515 2536 for v in setseq:
2516 2537 d.insert(v, v)
2517 2538
2518 2539 def doinsertscost():
2519 2540 d = util.lrucachedict(size, maxcost=costlimit)
2520 2541 for i, v in enumerate(setseq):
2521 2542 d.insert(v, v, cost=costs[i])
2522 2543
2523 2544 def dosets():
2524 2545 d = util.lrucachedict(size)
2525 2546 for v in setseq:
2526 2547 d[v] = v
2527 2548
2528 2549 # Mixed mode randomly performs gets and sets with eviction.
2529 2550 mixedops = []
2530 2551 for i in _xrange(mixed):
2531 2552 r = random.randint(0, 100)
2532 2553 if r < mixedgetfreq:
2533 2554 op = 0
2534 2555 else:
2535 2556 op = 1
2536 2557
2537 2558 mixedops.append((op,
2538 2559 random.randint(0, size * 2),
2539 2560 random.choice(costrange)))
2540 2561
2541 2562 def domixed():
2542 2563 d = util.lrucachedict(size)
2543 2564
2544 2565 for op, v, cost in mixedops:
2545 2566 if op == 0:
2546 2567 try:
2547 2568 d[v]
2548 2569 except KeyError:
2549 2570 pass
2550 2571 else:
2551 2572 d[v] = v
2552 2573
2553 2574 def domixedcost():
2554 2575 d = util.lrucachedict(size, maxcost=costlimit)
2555 2576
2556 2577 for op, v, cost in mixedops:
2557 2578 if op == 0:
2558 2579 try:
2559 2580 d[v]
2560 2581 except KeyError:
2561 2582 pass
2562 2583 else:
2563 2584 d.insert(v, v, cost=cost)
2564 2585
2565 2586 benches = [
2566 2587 (doinit, b'init'),
2567 2588 ]
2568 2589
2569 2590 if costlimit:
2570 2591 benches.extend([
2571 2592 (dogetscost, b'gets w/ cost limit'),
2572 2593 (doinsertscost, b'inserts w/ cost limit'),
2573 2594 (domixedcost, b'mixed w/ cost limit'),
2574 2595 ])
2575 2596 else:
2576 2597 benches.extend([
2577 2598 (dogets, b'gets'),
2578 2599 (doinserts, b'inserts'),
2579 2600 (dosets, b'sets'),
2580 2601 (domixed, b'mixed')
2581 2602 ])
2582 2603
2583 2604 for fn, title in benches:
2584 2605 timer, fm = gettimer(ui, opts)
2585 2606 timer(fn, title=title)
2586 2607 fm.end()
2587 2608
2588 2609 @command(b'perfwrite', formatteropts)
2589 2610 def perfwrite(ui, repo, **opts):
2590 2611 """microbenchmark ui.write
2591 2612 """
2592 2613 opts = _byteskwargs(opts)
2593 2614
2594 2615 timer, fm = gettimer(ui, opts)
2595 2616 def write():
2596 2617 for i in range(100000):
2597 2618 ui.write((b'Testing write performance\n'))
2598 2619 timer(write)
2599 2620 fm.end()
2600 2621
2601 2622 def uisetup(ui):
2602 2623 if (util.safehasattr(cmdutil, b'openrevlog') and
2603 2624 not util.safehasattr(commands, b'debugrevlogopts')):
2604 2625 # for "historical portability":
2605 2626 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
2606 2627 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
2607 2628 # openrevlog() should cause failure, because it has been
2608 2629 # available since 3.5 (or 49c583ca48c4).
2609 2630 def openrevlog(orig, repo, cmd, file_, opts):
2610 2631 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
2611 2632 raise error.Abort(b"This version doesn't support --dir option",
2612 2633 hint=b"use 3.5 or later")
2613 2634 return orig(repo, cmd, file_, opts)
2614 2635 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
2615 2636
2616 2637 @command(b'perfprogress', formatteropts + [
2617 2638 (b'', b'topic', b'topic', b'topic for progress messages'),
2618 2639 (b'c', b'total', 1000000, b'total value we are progressing to'),
2619 2640 ], norepo=True)
2620 2641 def perfprogress(ui, topic=None, total=None, **opts):
2621 2642 """printing of progress bars"""
2622 2643 opts = _byteskwargs(opts)
2623 2644
2624 2645 timer, fm = gettimer(ui, opts)
2625 2646
2626 2647 def doprogress():
2627 2648 with ui.makeprogress(topic, total=total) as progress:
2628 2649 for i in pycompat.xrange(total):
2629 2650 progress.increment()
2630 2651
2631 2652 timer(doprogress)
2632 2653 fm.end()
@@ -1,296 +1,300 b''
1 1 #require test-repo
2 2
3 3 Set vars:
4 4
5 5 $ . "$TESTDIR/helpers-testrepo.sh"
6 6 $ CONTRIBDIR="$TESTDIR/../contrib"
7 7
8 8 Prepare repo:
9 9
10 10 $ hg init
11 11
12 12 $ echo this is file a > a
13 13 $ hg add a
14 14 $ hg commit -m first
15 15
16 16 $ echo adding to file a >> a
17 17 $ hg commit -m second
18 18
19 19 $ echo adding more to file a >> a
20 20 $ hg commit -m third
21 21
22 22 $ hg up -r 0
23 23 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
24 24 $ echo merge-this >> a
25 25 $ hg commit -m merge-able
26 26 created new head
27 27
28 28 $ hg up -r 2
29 29 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
30 30
31 31 perfstatus
32 32
33 33 $ cat >> $HGRCPATH << EOF
34 34 > [extensions]
35 35 > perfstatusext=$CONTRIBDIR/perf.py
36 36 > [perf]
37 37 > presleep=0
38 38 > stub=on
39 39 > parentscount=1
40 40 > EOF
41 41 $ hg help perfstatusext
42 42 perfstatusext extension - helper extension to measure performance
43 43
44 44 list of commands:
45 45
46 46 perfaddremove
47 47 (no help text available)
48 48 perfancestors
49 49 (no help text available)
50 50 perfancestorset
51 51 (no help text available)
52 52 perfannotate (no help text available)
53 53 perfbdiff benchmark a bdiff between revisions
54 54 perfbookmarks
55 55 benchmark parsing bookmarks from disk to memory
56 56 perfbranchmap
57 57 benchmark the update of a branchmap
58 58 perfbranchmapload
59 59 benchmark reading the branchmap
60 60 perfbranchmapupdate
61 61 benchmark branchmap update from for <base> revs to <target>
62 62 revs
63 63 perfbundleread
64 64 Benchmark reading of bundle files.
65 65 perfcca (no help text available)
66 66 perfchangegroupchangelog
67 67 Benchmark producing a changelog group for a changegroup.
68 68 perfchangeset
69 69 (no help text available)
70 70 perfctxfiles (no help text available)
71 71 perfdiffwd Profile diff of working directory changes
72 72 perfdirfoldmap
73 73 (no help text available)
74 74 perfdirs (no help text available)
75 75 perfdirstate (no help text available)
76 76 perfdirstatedirs
77 77 (no help text available)
78 78 perfdirstatefoldmap
79 79 (no help text available)
80 80 perfdirstatewrite
81 81 (no help text available)
82 perfdiscovery
83 benchmark discovery between local repo and the peer at given
84 path
82 85 perffncacheencode
83 86 (no help text available)
84 87 perffncacheload
85 88 (no help text available)
86 89 perffncachewrite
87 90 (no help text available)
88 91 perfheads (no help text available)
89 92 perfhelper-pathcopies
90 93 find statistic about potential parameters for the
91 94 'perftracecopies'
92 95 perfignore benchmark operation related to computing ignore
93 96 perfindex (no help text available)
94 97 perflinelogedits
95 98 (no help text available)
96 99 perfloadmarkers
97 100 benchmark the time to parse the on-disk markers for a repo
98 101 perflog (no help text available)
99 102 perflookup (no help text available)
100 103 perflrucachedict
101 104 (no help text available)
102 105 perfmanifest benchmark the time to read a manifest from disk and return a
103 106 usable
104 107 perfmergecalculate
105 108 (no help text available)
106 109 perfmoonwalk benchmark walking the changelog backwards
107 110 perfnodelookup
108 111 (no help text available)
109 112 perfparents (no help text available)
110 113 perfpathcopies
111 114 benchmark the copy tracing logic
112 115 perfphases benchmark phasesets computation
113 116 perfphasesremote
114 117 benchmark time needed to analyse phases of the remote server
115 118 perfprogress printing of progress bars
116 119 perfrawfiles (no help text available)
117 120 perfrevlogchunks
118 121 Benchmark operations on revlog chunks.
119 122 perfrevlogindex
120 123 Benchmark operations against a revlog index.
121 124 perfrevlogrevision
122 125 Benchmark obtaining a revlog revision.
123 126 perfrevlogrevisions
124 127 Benchmark reading a series of revisions from a revlog.
125 128 perfrevlogwrite
126 129 Benchmark writing a series of revisions to a revlog.
127 130 perfrevrange (no help text available)
128 131 perfrevset benchmark the execution time of a revset
129 132 perfstartup (no help text available)
130 133 perfstatus (no help text available)
131 134 perftags (no help text available)
132 135 perftemplating
133 136 test the rendering time of a given template
134 137 perfunidiff benchmark a unified diff between revisions
135 138 perfvolatilesets
136 139 benchmark the computation of various volatile set
137 140 perfwalk (no help text available)
138 141 perfwrite microbenchmark ui.write
139 142
140 143 (use 'hg help -v perfstatusext' to show built-in aliases and global options)
141 144 $ hg perfaddremove
142 145 $ hg perfancestors
143 146 $ hg perfancestorset 2
144 147 $ hg perfannotate a
145 148 $ hg perfbdiff -c 1
146 149 $ hg perfbdiff --alldata 1
147 150 $ hg perfunidiff -c 1
148 151 $ hg perfunidiff --alldata 1
149 152 $ hg perfbookmarks
150 153 $ hg perfbranchmap
151 154 $ hg perfbranchmapload
152 155 $ hg perfbranchmapupdate --base "not tip" --target "tip"
153 156 benchmark of branchmap with 3 revisions with 1 new ones
154 157 $ hg perfcca
155 158 $ hg perfchangegroupchangelog
156 159 $ hg perfchangegroupchangelog --cgversion 01
157 160 $ hg perfchangeset 2
158 161 $ hg perfctxfiles 2
159 162 $ hg perfdiffwd
160 163 $ hg perfdirfoldmap
161 164 $ hg perfdirs
162 165 $ hg perfdirstate
163 166 $ hg perfdirstatedirs
164 167 $ hg perfdirstatefoldmap
165 168 $ hg perfdirstatewrite
166 169 #if repofncache
167 170 $ hg perffncacheencode
168 171 $ hg perffncacheload
169 172 $ hg debugrebuildfncache
170 173 fncache already up to date
171 174 $ hg perffncachewrite
172 175 $ hg debugrebuildfncache
173 176 fncache already up to date
174 177 #endif
175 178 $ hg perfheads
176 179 $ hg perfignore
177 180 $ hg perfindex
178 181 $ hg perflinelogedits -n 1
179 182 $ hg perfloadmarkers
180 183 $ hg perflog
181 184 $ hg perflookup 2
182 185 $ hg perflrucache
183 186 $ hg perfmanifest 2
184 187 $ hg perfmanifest -m 44fe2c8352bb3a478ffd7d8350bbc721920134d1
185 188 $ hg perfmanifest -m 44fe2c8352bb
186 189 abort: manifest revision must be integer or full node
187 190 [255]
188 191 $ hg perfmergecalculate -r 3
189 192 $ hg perfmoonwalk
190 193 $ hg perfnodelookup 2
191 194 $ hg perfpathcopies 1 2
192 195 $ hg perfprogress --total 1000
193 196 $ hg perfrawfiles 2
194 197 $ hg perfrevlogindex -c
195 198 #if reporevlogstore
196 199 $ hg perfrevlogrevisions .hg/store/data/a.i
197 200 #endif
198 201 $ hg perfrevlogrevision -m 0
199 202 $ hg perfrevlogchunks -c
200 203 $ hg perfrevrange
201 204 $ hg perfrevset 'all()'
202 205 $ hg perfstartup
203 206 $ hg perfstatus
204 207 $ hg perftags
205 208 $ hg perftemplating
206 209 $ hg perfvolatilesets
207 210 $ hg perfwalk
208 211 $ hg perfparents
212 $ hg perfdiscovery -q .
209 213
210 214 test actual output
211 215 ------------------
212 216
213 217 normal output:
214 218
215 219 $ hg perfheads --config perf.stub=no
216 220 ! wall * comb * user * sys * (best of *) (glob)
217 221
218 222 detailed output:
219 223
220 224 $ hg perfheads --config perf.all-timing=yes --config perf.stub=no
221 225 ! wall * comb * user * sys * (best of *) (glob)
222 226 ! wall * comb * user * sys * (max of *) (glob)
223 227 ! wall * comb * user * sys * (avg of *) (glob)
224 228 ! wall * comb * user * sys * (median of *) (glob)
225 229
226 230 test json output
227 231 ----------------
228 232
229 233 normal output:
230 234
231 235 $ hg perfheads --template json --config perf.stub=no
232 236 [
233 237 {
234 238 "comb": *, (glob)
235 239 "count": *, (glob)
236 240 "sys": *, (glob)
237 241 "user": *, (glob)
238 242 "wall": * (glob)
239 243 }
240 244 ]
241 245
242 246 detailed output:
243 247
244 248 $ hg perfheads --template json --config perf.all-timing=yes --config perf.stub=no
245 249 [
246 250 {
247 251 "avg.comb": *, (glob)
248 252 "avg.count": *, (glob)
249 253 "avg.sys": *, (glob)
250 254 "avg.user": *, (glob)
251 255 "avg.wall": *, (glob)
252 256 "comb": *, (glob)
253 257 "count": *, (glob)
254 258 "max.comb": *, (glob)
255 259 "max.count": *, (glob)
256 260 "max.sys": *, (glob)
257 261 "max.user": *, (glob)
258 262 "max.wall": *, (glob)
259 263 "median.comb": *, (glob)
260 264 "median.count": *, (glob)
261 265 "median.sys": *, (glob)
262 266 "median.user": *, (glob)
263 267 "median.wall": *, (glob)
264 268 "sys": *, (glob)
265 269 "user": *, (glob)
266 270 "wall": * (glob)
267 271 }
268 272 ]
269 273
270 274 Check perf.py for historical portability
271 275 ----------------------------------------
272 276
273 277 $ cd "$TESTDIR/.."
274 278
275 279 $ (testrepohg files -r 1.2 glob:mercurial/*.c glob:mercurial/*.py;
276 280 > testrepohg files -r tip glob:mercurial/*.c glob:mercurial/*.py) |
277 281 > "$TESTDIR"/check-perf-code.py contrib/perf.py
278 282 contrib/perf.py:\d+: (re)
279 283 > from mercurial import (
280 284 import newer module separately in try clause for early Mercurial
281 285 contrib/perf.py:\d+: (re)
282 286 > from mercurial import (
283 287 import newer module separately in try clause for early Mercurial
284 288 contrib/perf.py:\d+: (re)
285 289 > origindexpath = orig.opener.join(orig.indexfile)
286 290 use getvfs()/getsvfs() for early Mercurial
287 291 contrib/perf.py:\d+: (re)
288 292 > origdatapath = orig.opener.join(orig.datafile)
289 293 use getvfs()/getsvfs() for early Mercurial
290 294 contrib/perf.py:\d+: (re)
291 295 > vfs = vfsmod.vfs(tmpdir)
292 296 use getvfs()/getsvfs() for early Mercurial
293 297 contrib/perf.py:\d+: (re)
294 298 > vfs.options = getattr(orig.opener, 'options', None)
295 299 use getvfs()/getsvfs() for early Mercurial
296 300 [1]
General Comments 0
You need to be logged in to leave comments. Login now