##// END OF EJS Templates
perf: add `storage` as possible source for perfrevlogwrite...
Boris Feld -
r40590:355ae096 default
parent child Browse files
Show More
@@ -1,2355 +1,2360 b''
1 1 # perf.py - performance test routines
2 2 '''helper extension to measure performance'''
3 3
4 4 # "historical portability" policy of perf.py:
5 5 #
6 6 # We have to do:
7 7 # - make perf.py "loadable" with as wide Mercurial version as possible
8 8 # This doesn't mean that perf commands work correctly with that Mercurial.
9 9 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
10 10 # - make historical perf command work correctly with as wide Mercurial
11 11 # version as possible
12 12 #
13 13 # We have to do, if possible with reasonable cost:
14 14 # - make recent perf command for historical feature work correctly
15 15 # with early Mercurial
16 16 #
17 17 # We don't have to do:
18 18 # - make perf command for recent feature work correctly with early
19 19 # Mercurial
20 20
21 21 from __future__ import absolute_import
22 22 import contextlib
23 23 import functools
24 24 import gc
25 25 import os
26 26 import random
27 27 import shutil
28 28 import struct
29 29 import sys
30 30 import tempfile
31 31 import threading
32 32 import time
33 33 from mercurial import (
34 34 changegroup,
35 35 cmdutil,
36 36 commands,
37 37 copies,
38 38 error,
39 39 extensions,
40 40 mdiff,
41 41 merge,
42 42 revlog,
43 43 util,
44 44 )
45 45
46 46 # for "historical portability":
47 47 # try to import modules separately (in dict order), and ignore
48 48 # failure, because these aren't available with early Mercurial
49 49 try:
50 50 from mercurial import branchmap # since 2.5 (or bcee63733aad)
51 51 except ImportError:
52 52 pass
53 53 try:
54 54 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
55 55 except ImportError:
56 56 pass
57 57 try:
58 58 from mercurial import registrar # since 3.7 (or 37d50250b696)
59 59 dir(registrar) # forcibly load it
60 60 except ImportError:
61 61 registrar = None
62 62 try:
63 63 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
64 64 except ImportError:
65 65 pass
66 66 try:
67 67 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
68 68 except ImportError:
69 69 pass
70 70
71 71 def identity(a):
72 72 return a
73 73
74 74 try:
75 75 from mercurial import pycompat
76 76 getargspec = pycompat.getargspec # added to module after 4.5
77 77 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
78 78 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
79 79 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
80 80 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
81 81 if pycompat.ispy3:
82 82 _maxint = sys.maxsize # per py3 docs for replacing maxint
83 83 else:
84 84 _maxint = sys.maxint
85 85 except (ImportError, AttributeError):
86 86 import inspect
87 87 getargspec = inspect.getargspec
88 88 _byteskwargs = identity
89 89 fsencode = identity # no py3 support
90 90 _maxint = sys.maxint # no py3 support
91 91 _sysstr = lambda x: x # no py3 support
92 92 _xrange = xrange
93 93
94 94 try:
95 95 # 4.7+
96 96 queue = pycompat.queue.Queue
97 97 except (AttributeError, ImportError):
98 98 # <4.7.
99 99 try:
100 100 queue = pycompat.queue
101 101 except (AttributeError, ImportError):
102 102 queue = util.queue
103 103
104 104 try:
105 105 from mercurial import logcmdutil
106 106 makelogtemplater = logcmdutil.maketemplater
107 107 except (AttributeError, ImportError):
108 108 try:
109 109 makelogtemplater = cmdutil.makelogtemplater
110 110 except (AttributeError, ImportError):
111 111 makelogtemplater = None
112 112
113 113 # for "historical portability":
114 114 # define util.safehasattr forcibly, because util.safehasattr has been
115 115 # available since 1.9.3 (or 94b200a11cf7)
116 116 _undefined = object()
117 117 def safehasattr(thing, attr):
118 118 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
119 119 setattr(util, 'safehasattr', safehasattr)
120 120
121 121 # for "historical portability":
122 122 # define util.timer forcibly, because util.timer has been available
123 123 # since ae5d60bb70c9
124 124 if safehasattr(time, 'perf_counter'):
125 125 util.timer = time.perf_counter
126 126 elif os.name == b'nt':
127 127 util.timer = time.clock
128 128 else:
129 129 util.timer = time.time
130 130
131 131 # for "historical portability":
132 132 # use locally defined empty option list, if formatteropts isn't
133 133 # available, because commands.formatteropts has been available since
134 134 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
135 135 # available since 2.2 (or ae5f92e154d3)
136 136 formatteropts = getattr(cmdutil, "formatteropts",
137 137 getattr(commands, "formatteropts", []))
138 138
139 139 # for "historical portability":
140 140 # use locally defined option list, if debugrevlogopts isn't available,
141 141 # because commands.debugrevlogopts has been available since 3.7 (or
142 142 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
143 143 # since 1.9 (or a79fea6b3e77).
144 144 revlogopts = getattr(cmdutil, "debugrevlogopts",
145 145 getattr(commands, "debugrevlogopts", [
146 146 (b'c', b'changelog', False, (b'open changelog')),
147 147 (b'm', b'manifest', False, (b'open manifest')),
148 148 (b'', b'dir', False, (b'open directory manifest')),
149 149 ]))
150 150
151 151 cmdtable = {}
152 152
153 153 # for "historical portability":
154 154 # define parsealiases locally, because cmdutil.parsealiases has been
155 155 # available since 1.5 (or 6252852b4332)
156 156 def parsealiases(cmd):
157 157 return cmd.split(b"|")
158 158
159 159 if safehasattr(registrar, 'command'):
160 160 command = registrar.command(cmdtable)
161 161 elif safehasattr(cmdutil, 'command'):
162 162 command = cmdutil.command(cmdtable)
163 163 if b'norepo' not in getargspec(command).args:
164 164 # for "historical portability":
165 165 # wrap original cmdutil.command, because "norepo" option has
166 166 # been available since 3.1 (or 75a96326cecb)
167 167 _command = command
168 168 def command(name, options=(), synopsis=None, norepo=False):
169 169 if norepo:
170 170 commands.norepo += b' %s' % b' '.join(parsealiases(name))
171 171 return _command(name, list(options), synopsis)
172 172 else:
173 173 # for "historical portability":
174 174 # define "@command" annotation locally, because cmdutil.command
175 175 # has been available since 1.9 (or 2daa5179e73f)
176 176 def command(name, options=(), synopsis=None, norepo=False):
177 177 def decorator(func):
178 178 if synopsis:
179 179 cmdtable[name] = func, list(options), synopsis
180 180 else:
181 181 cmdtable[name] = func, list(options)
182 182 if norepo:
183 183 commands.norepo += b' %s' % b' '.join(parsealiases(name))
184 184 return func
185 185 return decorator
186 186
187 187 try:
188 188 import mercurial.registrar
189 189 import mercurial.configitems
190 190 configtable = {}
191 191 configitem = mercurial.registrar.configitem(configtable)
192 192 configitem(b'perf', b'presleep',
193 193 default=mercurial.configitems.dynamicdefault,
194 194 )
195 195 configitem(b'perf', b'stub',
196 196 default=mercurial.configitems.dynamicdefault,
197 197 )
198 198 configitem(b'perf', b'parentscount',
199 199 default=mercurial.configitems.dynamicdefault,
200 200 )
201 201 configitem(b'perf', b'all-timing',
202 202 default=mercurial.configitems.dynamicdefault,
203 203 )
204 204 except (ImportError, AttributeError):
205 205 pass
206 206
207 207 def getlen(ui):
208 208 if ui.configbool(b"perf", b"stub", False):
209 209 return lambda x: 1
210 210 return len
211 211
212 212 def gettimer(ui, opts=None):
213 213 """return a timer function and formatter: (timer, formatter)
214 214
215 215 This function exists to gather the creation of formatter in a single
216 216 place instead of duplicating it in all performance commands."""
217 217
218 218 # enforce an idle period before execution to counteract power management
219 219 # experimental config: perf.presleep
220 220 time.sleep(getint(ui, b"perf", b"presleep", 1))
221 221
222 222 if opts is None:
223 223 opts = {}
224 224 # redirect all to stderr unless buffer api is in use
225 225 if not ui._buffers:
226 226 ui = ui.copy()
227 227 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
228 228 if uifout:
229 229 # for "historical portability":
230 230 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
231 231 uifout.set(ui.ferr)
232 232
233 233 # get a formatter
234 234 uiformatter = getattr(ui, 'formatter', None)
235 235 if uiformatter:
236 236 fm = uiformatter(b'perf', opts)
237 237 else:
238 238 # for "historical portability":
239 239 # define formatter locally, because ui.formatter has been
240 240 # available since 2.2 (or ae5f92e154d3)
241 241 from mercurial import node
242 242 class defaultformatter(object):
243 243 """Minimized composition of baseformatter and plainformatter
244 244 """
245 245 def __init__(self, ui, topic, opts):
246 246 self._ui = ui
247 247 if ui.debugflag:
248 248 self.hexfunc = node.hex
249 249 else:
250 250 self.hexfunc = node.short
251 251 def __nonzero__(self):
252 252 return False
253 253 __bool__ = __nonzero__
254 254 def startitem(self):
255 255 pass
256 256 def data(self, **data):
257 257 pass
258 258 def write(self, fields, deftext, *fielddata, **opts):
259 259 self._ui.write(deftext % fielddata, **opts)
260 260 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
261 261 if cond:
262 262 self._ui.write(deftext % fielddata, **opts)
263 263 def plain(self, text, **opts):
264 264 self._ui.write(text, **opts)
265 265 def end(self):
266 266 pass
267 267 fm = defaultformatter(ui, b'perf', opts)
268 268
269 269 # stub function, runs code only once instead of in a loop
270 270 # experimental config: perf.stub
271 271 if ui.configbool(b"perf", b"stub", False):
272 272 return functools.partial(stub_timer, fm), fm
273 273
274 274 # experimental config: perf.all-timing
275 275 displayall = ui.configbool(b"perf", b"all-timing", False)
276 276 return functools.partial(_timer, fm, displayall=displayall), fm
277 277
278 278 def stub_timer(fm, func, title=None):
279 279 func()
280 280
281 281 @contextlib.contextmanager
282 282 def timeone():
283 283 r = []
284 284 ostart = os.times()
285 285 cstart = util.timer()
286 286 yield r
287 287 cstop = util.timer()
288 288 ostop = os.times()
289 289 a, b = ostart, ostop
290 290 r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
291 291
292 292 def _timer(fm, func, title=None, displayall=False):
293 293 gc.collect()
294 294 results = []
295 295 begin = util.timer()
296 296 count = 0
297 297 while True:
298 298 with timeone() as item:
299 299 r = func()
300 300 count += 1
301 301 results.append(item[0])
302 302 cstop = util.timer()
303 303 if cstop - begin > 3 and count >= 100:
304 304 break
305 305 if cstop - begin > 10 and count >= 3:
306 306 break
307 307
308 308 formatone(fm, results, title=title, result=r,
309 309 displayall=displayall)
310 310
311 311 def formatone(fm, timings, title=None, result=None, displayall=False):
312 312
313 313 count = len(timings)
314 314
315 315 fm.startitem()
316 316
317 317 if title:
318 318 fm.write(b'title', b'! %s\n', title)
319 319 if result:
320 320 fm.write(b'result', b'! result: %s\n', result)
321 321 def display(role, entry):
322 322 prefix = b''
323 323 if role != b'best':
324 324 prefix = b'%s.' % role
325 325 fm.plain(b'!')
326 326 fm.write(prefix + b'wall', b' wall %f', entry[0])
327 327 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
328 328 fm.write(prefix + b'user', b' user %f', entry[1])
329 329 fm.write(prefix + b'sys', b' sys %f', entry[2])
330 330 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
331 331 fm.plain(b'\n')
332 332 timings.sort()
333 333 min_val = timings[0]
334 334 display(b'best', min_val)
335 335 if displayall:
336 336 max_val = timings[-1]
337 337 display(b'max', max_val)
338 338 avg = tuple([sum(x) / count for x in zip(*timings)])
339 339 display(b'avg', avg)
340 340 median = timings[len(timings) // 2]
341 341 display(b'median', median)
342 342
343 343 # utilities for historical portability
344 344
345 345 def getint(ui, section, name, default):
346 346 # for "historical portability":
347 347 # ui.configint has been available since 1.9 (or fa2b596db182)
348 348 v = ui.config(section, name, None)
349 349 if v is None:
350 350 return default
351 351 try:
352 352 return int(v)
353 353 except ValueError:
354 354 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
355 355 % (section, name, v))
356 356
357 357 def safeattrsetter(obj, name, ignoremissing=False):
358 358 """Ensure that 'obj' has 'name' attribute before subsequent setattr
359 359
360 360 This function is aborted, if 'obj' doesn't have 'name' attribute
361 361 at runtime. This avoids overlooking removal of an attribute, which
362 362 breaks assumption of performance measurement, in the future.
363 363
364 364 This function returns the object to (1) assign a new value, and
365 365 (2) restore an original value to the attribute.
366 366
367 367 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
368 368 abortion, and this function returns None. This is useful to
369 369 examine an attribute, which isn't ensured in all Mercurial
370 370 versions.
371 371 """
372 372 if not util.safehasattr(obj, name):
373 373 if ignoremissing:
374 374 return None
375 375 raise error.Abort((b"missing attribute %s of %s might break assumption"
376 376 b" of performance measurement") % (name, obj))
377 377
378 378 origvalue = getattr(obj, _sysstr(name))
379 379 class attrutil(object):
380 380 def set(self, newvalue):
381 381 setattr(obj, _sysstr(name), newvalue)
382 382 def restore(self):
383 383 setattr(obj, _sysstr(name), origvalue)
384 384
385 385 return attrutil()
386 386
387 387 # utilities to examine each internal API changes
388 388
389 389 def getbranchmapsubsettable():
390 390 # for "historical portability":
391 391 # subsettable is defined in:
392 392 # - branchmap since 2.9 (or 175c6fd8cacc)
393 393 # - repoview since 2.5 (or 59a9f18d4587)
394 394 for mod in (branchmap, repoview):
395 395 subsettable = getattr(mod, 'subsettable', None)
396 396 if subsettable:
397 397 return subsettable
398 398
399 399 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
400 400 # branchmap and repoview modules exist, but subsettable attribute
401 401 # doesn't)
402 402 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
403 403 hint=b"use 2.5 or later")
404 404
405 405 def getsvfs(repo):
406 406 """Return appropriate object to access files under .hg/store
407 407 """
408 408 # for "historical portability":
409 409 # repo.svfs has been available since 2.3 (or 7034365089bf)
410 410 svfs = getattr(repo, 'svfs', None)
411 411 if svfs:
412 412 return svfs
413 413 else:
414 414 return getattr(repo, 'sopener')
415 415
416 416 def getvfs(repo):
417 417 """Return appropriate object to access files under .hg
418 418 """
419 419 # for "historical portability":
420 420 # repo.vfs has been available since 2.3 (or 7034365089bf)
421 421 vfs = getattr(repo, 'vfs', None)
422 422 if vfs:
423 423 return vfs
424 424 else:
425 425 return getattr(repo, 'opener')
426 426
427 427 def repocleartagscachefunc(repo):
428 428 """Return the function to clear tags cache according to repo internal API
429 429 """
430 430 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
431 431 # in this case, setattr(repo, '_tagscache', None) or so isn't
432 432 # correct way to clear tags cache, because existing code paths
433 433 # expect _tagscache to be a structured object.
434 434 def clearcache():
435 435 # _tagscache has been filteredpropertycache since 2.5 (or
436 436 # 98c867ac1330), and delattr() can't work in such case
437 437 if b'_tagscache' in vars(repo):
438 438 del repo.__dict__[b'_tagscache']
439 439 return clearcache
440 440
441 441 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
442 442 if repotags: # since 1.4 (or 5614a628d173)
443 443 return lambda : repotags.set(None)
444 444
445 445 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
446 446 if repotagscache: # since 0.6 (or d7df759d0e97)
447 447 return lambda : repotagscache.set(None)
448 448
449 449 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
450 450 # this point, but it isn't so problematic, because:
451 451 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
452 452 # in perftags() causes failure soon
453 453 # - perf.py itself has been available since 1.1 (or eb240755386d)
454 454 raise error.Abort((b"tags API of this hg command is unknown"))
455 455
456 456 # utilities to clear cache
457 457
458 458 def clearfilecache(repo, attrname):
459 459 unfi = repo.unfiltered()
460 460 if attrname in vars(unfi):
461 461 delattr(unfi, attrname)
462 462 unfi._filecache.pop(attrname, None)
463 463
464 464 # perf commands
465 465
466 466 @command(b'perfwalk', formatteropts)
467 467 def perfwalk(ui, repo, *pats, **opts):
468 468 opts = _byteskwargs(opts)
469 469 timer, fm = gettimer(ui, opts)
470 470 m = scmutil.match(repo[None], pats, {})
471 471 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
472 472 ignored=False))))
473 473 fm.end()
474 474
475 475 @command(b'perfannotate', formatteropts)
476 476 def perfannotate(ui, repo, f, **opts):
477 477 opts = _byteskwargs(opts)
478 478 timer, fm = gettimer(ui, opts)
479 479 fc = repo[b'.'][f]
480 480 timer(lambda: len(fc.annotate(True)))
481 481 fm.end()
482 482
483 483 @command(b'perfstatus',
484 484 [(b'u', b'unknown', False,
485 485 b'ask status to look for unknown files')] + formatteropts)
486 486 def perfstatus(ui, repo, **opts):
487 487 opts = _byteskwargs(opts)
488 488 #m = match.always(repo.root, repo.getcwd())
489 489 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
490 490 # False))))
491 491 timer, fm = gettimer(ui, opts)
492 492 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
493 493 fm.end()
494 494
495 495 @command(b'perfaddremove', formatteropts)
496 496 def perfaddremove(ui, repo, **opts):
497 497 opts = _byteskwargs(opts)
498 498 timer, fm = gettimer(ui, opts)
499 499 try:
500 500 oldquiet = repo.ui.quiet
501 501 repo.ui.quiet = True
502 502 matcher = scmutil.match(repo[None])
503 503 opts[b'dry_run'] = True
504 504 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
505 505 finally:
506 506 repo.ui.quiet = oldquiet
507 507 fm.end()
508 508
509 509 def clearcaches(cl):
510 510 # behave somewhat consistently across internal API changes
511 511 if util.safehasattr(cl, b'clearcaches'):
512 512 cl.clearcaches()
513 513 elif util.safehasattr(cl, b'_nodecache'):
514 514 from mercurial.node import nullid, nullrev
515 515 cl._nodecache = {nullid: nullrev}
516 516 cl._nodepos = None
517 517
518 518 @command(b'perfheads', formatteropts)
519 519 def perfheads(ui, repo, **opts):
520 520 opts = _byteskwargs(opts)
521 521 timer, fm = gettimer(ui, opts)
522 522 cl = repo.changelog
523 523 def d():
524 524 len(cl.headrevs())
525 525 clearcaches(cl)
526 526 timer(d)
527 527 fm.end()
528 528
529 529 @command(b'perftags', formatteropts)
530 530 def perftags(ui, repo, **opts):
531 531 import mercurial.changelog
532 532 import mercurial.manifest
533 533
534 534 opts = _byteskwargs(opts)
535 535 timer, fm = gettimer(ui, opts)
536 536 svfs = getsvfs(repo)
537 537 repocleartagscache = repocleartagscachefunc(repo)
538 538 def t():
539 539 repo.changelog = mercurial.changelog.changelog(svfs)
540 540 rootmanifest = mercurial.manifest.manifestrevlog(svfs)
541 541 repo.manifestlog = mercurial.manifest.manifestlog(svfs, repo,
542 542 rootmanifest)
543 543 repocleartagscache()
544 544 return len(repo.tags())
545 545 timer(t)
546 546 fm.end()
547 547
548 548 @command(b'perfancestors', formatteropts)
549 549 def perfancestors(ui, repo, **opts):
550 550 opts = _byteskwargs(opts)
551 551 timer, fm = gettimer(ui, opts)
552 552 heads = repo.changelog.headrevs()
553 553 def d():
554 554 for a in repo.changelog.ancestors(heads):
555 555 pass
556 556 timer(d)
557 557 fm.end()
558 558
559 559 @command(b'perfancestorset', formatteropts)
560 560 def perfancestorset(ui, repo, revset, **opts):
561 561 opts = _byteskwargs(opts)
562 562 timer, fm = gettimer(ui, opts)
563 563 revs = repo.revs(revset)
564 564 heads = repo.changelog.headrevs()
565 565 def d():
566 566 s = repo.changelog.ancestors(heads)
567 567 for rev in revs:
568 568 rev in s
569 569 timer(d)
570 570 fm.end()
571 571
572 572 @command(b'perfbookmarks', formatteropts)
573 573 def perfbookmarks(ui, repo, **opts):
574 574 """benchmark parsing bookmarks from disk to memory"""
575 575 opts = _byteskwargs(opts)
576 576 timer, fm = gettimer(ui, opts)
577 577 def d():
578 578 clearfilecache(repo, b'_bookmarks')
579 579 repo._bookmarks
580 580 timer(d)
581 581 fm.end()
582 582
583 583 @command(b'perfbundleread', formatteropts, b'BUNDLE')
584 584 def perfbundleread(ui, repo, bundlepath, **opts):
585 585 """Benchmark reading of bundle files.
586 586
587 587 This command is meant to isolate the I/O part of bundle reading as
588 588 much as possible.
589 589 """
590 590 from mercurial import (
591 591 bundle2,
592 592 exchange,
593 593 streamclone,
594 594 )
595 595
596 596 opts = _byteskwargs(opts)
597 597
598 598 def makebench(fn):
599 599 def run():
600 600 with open(bundlepath, b'rb') as fh:
601 601 bundle = exchange.readbundle(ui, fh, bundlepath)
602 602 fn(bundle)
603 603
604 604 return run
605 605
606 606 def makereadnbytes(size):
607 607 def run():
608 608 with open(bundlepath, b'rb') as fh:
609 609 bundle = exchange.readbundle(ui, fh, bundlepath)
610 610 while bundle.read(size):
611 611 pass
612 612
613 613 return run
614 614
615 615 def makestdioread(size):
616 616 def run():
617 617 with open(bundlepath, b'rb') as fh:
618 618 while fh.read(size):
619 619 pass
620 620
621 621 return run
622 622
623 623 # bundle1
624 624
625 625 def deltaiter(bundle):
626 626 for delta in bundle.deltaiter():
627 627 pass
628 628
629 629 def iterchunks(bundle):
630 630 for chunk in bundle.getchunks():
631 631 pass
632 632
633 633 # bundle2
634 634
635 635 def forwardchunks(bundle):
636 636 for chunk in bundle._forwardchunks():
637 637 pass
638 638
639 639 def iterparts(bundle):
640 640 for part in bundle.iterparts():
641 641 pass
642 642
643 643 def iterpartsseekable(bundle):
644 644 for part in bundle.iterparts(seekable=True):
645 645 pass
646 646
647 647 def seek(bundle):
648 648 for part in bundle.iterparts(seekable=True):
649 649 part.seek(0, os.SEEK_END)
650 650
651 651 def makepartreadnbytes(size):
652 652 def run():
653 653 with open(bundlepath, b'rb') as fh:
654 654 bundle = exchange.readbundle(ui, fh, bundlepath)
655 655 for part in bundle.iterparts():
656 656 while part.read(size):
657 657 pass
658 658
659 659 return run
660 660
661 661 benches = [
662 662 (makestdioread(8192), b'read(8k)'),
663 663 (makestdioread(16384), b'read(16k)'),
664 664 (makestdioread(32768), b'read(32k)'),
665 665 (makestdioread(131072), b'read(128k)'),
666 666 ]
667 667
668 668 with open(bundlepath, b'rb') as fh:
669 669 bundle = exchange.readbundle(ui, fh, bundlepath)
670 670
671 671 if isinstance(bundle, changegroup.cg1unpacker):
672 672 benches.extend([
673 673 (makebench(deltaiter), b'cg1 deltaiter()'),
674 674 (makebench(iterchunks), b'cg1 getchunks()'),
675 675 (makereadnbytes(8192), b'cg1 read(8k)'),
676 676 (makereadnbytes(16384), b'cg1 read(16k)'),
677 677 (makereadnbytes(32768), b'cg1 read(32k)'),
678 678 (makereadnbytes(131072), b'cg1 read(128k)'),
679 679 ])
680 680 elif isinstance(bundle, bundle2.unbundle20):
681 681 benches.extend([
682 682 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
683 683 (makebench(iterparts), b'bundle2 iterparts()'),
684 684 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
685 685 (makebench(seek), b'bundle2 part seek()'),
686 686 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
687 687 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
688 688 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
689 689 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
690 690 ])
691 691 elif isinstance(bundle, streamclone.streamcloneapplier):
692 692 raise error.Abort(b'stream clone bundles not supported')
693 693 else:
694 694 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
695 695
696 696 for fn, title in benches:
697 697 timer, fm = gettimer(ui, opts)
698 698 timer(fn, title=title)
699 699 fm.end()
700 700
701 701 @command(b'perfchangegroupchangelog', formatteropts +
702 702 [(b'', b'version', b'02', b'changegroup version'),
703 703 (b'r', b'rev', b'', b'revisions to add to changegroup')])
704 704 def perfchangegroupchangelog(ui, repo, version=b'02', rev=None, **opts):
705 705 """Benchmark producing a changelog group for a changegroup.
706 706
707 707 This measures the time spent processing the changelog during a
708 708 bundle operation. This occurs during `hg bundle` and on a server
709 709 processing a `getbundle` wire protocol request (handles clones
710 710 and pull requests).
711 711
712 712 By default, all revisions are added to the changegroup.
713 713 """
714 714 opts = _byteskwargs(opts)
715 715 cl = repo.changelog
716 716 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
717 717 bundler = changegroup.getbundler(version, repo)
718 718
719 719 def d():
720 720 state, chunks = bundler._generatechangelog(cl, nodes)
721 721 for chunk in chunks:
722 722 pass
723 723
724 724 timer, fm = gettimer(ui, opts)
725 725
726 726 # Terminal printing can interfere with timing. So disable it.
727 727 with ui.configoverride({(b'progress', b'disable'): True}):
728 728 timer(d)
729 729
730 730 fm.end()
731 731
732 732 @command(b'perfdirs', formatteropts)
733 733 def perfdirs(ui, repo, **opts):
734 734 opts = _byteskwargs(opts)
735 735 timer, fm = gettimer(ui, opts)
736 736 dirstate = repo.dirstate
737 737 b'a' in dirstate
738 738 def d():
739 739 dirstate.hasdir(b'a')
740 740 del dirstate._map._dirs
741 741 timer(d)
742 742 fm.end()
743 743
744 744 @command(b'perfdirstate', formatteropts)
745 745 def perfdirstate(ui, repo, **opts):
746 746 opts = _byteskwargs(opts)
747 747 timer, fm = gettimer(ui, opts)
748 748 b"a" in repo.dirstate
749 749 def d():
750 750 repo.dirstate.invalidate()
751 751 b"a" in repo.dirstate
752 752 timer(d)
753 753 fm.end()
754 754
755 755 @command(b'perfdirstatedirs', formatteropts)
756 756 def perfdirstatedirs(ui, repo, **opts):
757 757 opts = _byteskwargs(opts)
758 758 timer, fm = gettimer(ui, opts)
759 759 b"a" in repo.dirstate
760 760 def d():
761 761 repo.dirstate.hasdir(b"a")
762 762 del repo.dirstate._map._dirs
763 763 timer(d)
764 764 fm.end()
765 765
766 766 @command(b'perfdirstatefoldmap', formatteropts)
767 767 def perfdirstatefoldmap(ui, repo, **opts):
768 768 opts = _byteskwargs(opts)
769 769 timer, fm = gettimer(ui, opts)
770 770 dirstate = repo.dirstate
771 771 b'a' in dirstate
772 772 def d():
773 773 dirstate._map.filefoldmap.get(b'a')
774 774 del dirstate._map.filefoldmap
775 775 timer(d)
776 776 fm.end()
777 777
778 778 @command(b'perfdirfoldmap', formatteropts)
779 779 def perfdirfoldmap(ui, repo, **opts):
780 780 opts = _byteskwargs(opts)
781 781 timer, fm = gettimer(ui, opts)
782 782 dirstate = repo.dirstate
783 783 b'a' in dirstate
784 784 def d():
785 785 dirstate._map.dirfoldmap.get(b'a')
786 786 del dirstate._map.dirfoldmap
787 787 del dirstate._map._dirs
788 788 timer(d)
789 789 fm.end()
790 790
791 791 @command(b'perfdirstatewrite', formatteropts)
792 792 def perfdirstatewrite(ui, repo, **opts):
793 793 opts = _byteskwargs(opts)
794 794 timer, fm = gettimer(ui, opts)
795 795 ds = repo.dirstate
796 796 b"a" in ds
797 797 def d():
798 798 ds._dirty = True
799 799 ds.write(repo.currenttransaction())
800 800 timer(d)
801 801 fm.end()
802 802
803 803 @command(b'perfmergecalculate',
804 804 [(b'r', b'rev', b'.', b'rev to merge against')] + formatteropts)
805 805 def perfmergecalculate(ui, repo, rev, **opts):
806 806 opts = _byteskwargs(opts)
807 807 timer, fm = gettimer(ui, opts)
808 808 wctx = repo[None]
809 809 rctx = scmutil.revsingle(repo, rev, rev)
810 810 ancestor = wctx.ancestor(rctx)
811 811 # we don't want working dir files to be stat'd in the benchmark, so prime
812 812 # that cache
813 813 wctx.dirty()
814 814 def d():
815 815 # acceptremote is True because we don't want prompts in the middle of
816 816 # our benchmark
817 817 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
818 818 acceptremote=True, followcopies=True)
819 819 timer(d)
820 820 fm.end()
821 821
822 822 @command(b'perfpathcopies', [], b"REV REV")
823 823 def perfpathcopies(ui, repo, rev1, rev2, **opts):
824 824 opts = _byteskwargs(opts)
825 825 timer, fm = gettimer(ui, opts)
826 826 ctx1 = scmutil.revsingle(repo, rev1, rev1)
827 827 ctx2 = scmutil.revsingle(repo, rev2, rev2)
828 828 def d():
829 829 copies.pathcopies(ctx1, ctx2)
830 830 timer(d)
831 831 fm.end()
832 832
833 833 @command(b'perfphases',
834 834 [(b'', b'full', False, b'include file reading time too'),
835 835 ], b"")
836 836 def perfphases(ui, repo, **opts):
837 837 """benchmark phasesets computation"""
838 838 opts = _byteskwargs(opts)
839 839 timer, fm = gettimer(ui, opts)
840 840 _phases = repo._phasecache
841 841 full = opts.get(b'full')
842 842 def d():
843 843 phases = _phases
844 844 if full:
845 845 clearfilecache(repo, b'_phasecache')
846 846 phases = repo._phasecache
847 847 phases.invalidate()
848 848 phases.loadphaserevs(repo)
849 849 timer(d)
850 850 fm.end()
851 851
852 852 @command(b'perfphasesremote',
853 853 [], b"[DEST]")
854 854 def perfphasesremote(ui, repo, dest=None, **opts):
855 855 """benchmark time needed to analyse phases of the remote server"""
856 856 from mercurial.node import (
857 857 bin,
858 858 )
859 859 from mercurial import (
860 860 exchange,
861 861 hg,
862 862 phases,
863 863 )
864 864 opts = _byteskwargs(opts)
865 865 timer, fm = gettimer(ui, opts)
866 866
867 867 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
868 868 if not path:
869 869 raise error.Abort((b'default repository not configured!'),
870 870 hint=(b"see 'hg help config.paths'"))
871 871 dest = path.pushloc or path.loc
872 872 branches = (path.branch, opts.get(b'branch') or [])
873 873 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
874 874 revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get(b'rev'))
875 875 other = hg.peer(repo, opts, dest)
876 876
877 877 # easier to perform discovery through the operation
878 878 op = exchange.pushoperation(repo, other)
879 879 exchange._pushdiscoverychangeset(op)
880 880
881 881 remotesubset = op.fallbackheads
882 882
883 883 with other.commandexecutor() as e:
884 884 remotephases = e.callcommand(b'listkeys',
885 885 {b'namespace': b'phases'}).result()
886 886 del other
887 887 publishing = remotephases.get(b'publishing', False)
888 888 if publishing:
889 889 ui.status((b'publishing: yes\n'))
890 890 else:
891 891 ui.status((b'publishing: no\n'))
892 892
893 893 nodemap = repo.changelog.nodemap
894 894 nonpublishroots = 0
895 895 for nhex, phase in remotephases.iteritems():
896 896 if nhex == b'publishing': # ignore data related to publish option
897 897 continue
898 898 node = bin(nhex)
899 899 if node in nodemap and int(phase):
900 900 nonpublishroots += 1
901 901 ui.status((b'number of roots: %d\n') % len(remotephases))
902 902 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
903 903 def d():
904 904 phases.remotephasessummary(repo,
905 905 remotesubset,
906 906 remotephases)
907 907 timer(d)
908 908 fm.end()
909 909
910 910 @command(b'perfmanifest',[
911 911 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
912 912 (b'', b'clear-disk', False, b'clear on-disk caches too'),
913 913 ] + formatteropts, b'REV|NODE')
914 914 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
915 915 """benchmark the time to read a manifest from disk and return a usable
916 916 dict-like object
917 917
918 918 Manifest caches are cleared before retrieval."""
919 919 opts = _byteskwargs(opts)
920 920 timer, fm = gettimer(ui, opts)
921 921 if not manifest_rev:
922 922 ctx = scmutil.revsingle(repo, rev, rev)
923 923 t = ctx.manifestnode()
924 924 else:
925 925 from mercurial.node import bin
926 926
927 927 if len(rev) == 40:
928 928 t = bin(rev)
929 929 else:
930 930 try:
931 931 rev = int(rev)
932 932
933 933 if util.safehasattr(repo.manifestlog, b'getstorage'):
934 934 t = repo.manifestlog.getstorage(b'').node(rev)
935 935 else:
936 936 t = repo.manifestlog._revlog.lookup(rev)
937 937 except ValueError:
938 938 raise error.Abort(b'manifest revision must be integer or full '
939 939 b'node')
940 940 def d():
941 941 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
942 942 repo.manifestlog[t].read()
943 943 timer(d)
944 944 fm.end()
945 945
946 946 @command(b'perfchangeset', formatteropts)
947 947 def perfchangeset(ui, repo, rev, **opts):
948 948 opts = _byteskwargs(opts)
949 949 timer, fm = gettimer(ui, opts)
950 950 n = scmutil.revsingle(repo, rev).node()
951 951 def d():
952 952 repo.changelog.read(n)
953 953 #repo.changelog._cache = None
954 954 timer(d)
955 955 fm.end()
956 956
957 957 @command(b'perfindex', formatteropts)
958 958 def perfindex(ui, repo, **opts):
959 959 import mercurial.revlog
960 960 opts = _byteskwargs(opts)
961 961 timer, fm = gettimer(ui, opts)
962 962 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
963 963 n = repo[b"tip"].node()
964 964 svfs = getsvfs(repo)
965 965 def d():
966 966 cl = mercurial.revlog.revlog(svfs, b"00changelog.i")
967 967 cl.rev(n)
968 968 timer(d)
969 969 fm.end()
970 970
971 971 @command(b'perfstartup', formatteropts)
972 972 def perfstartup(ui, repo, **opts):
973 973 opts = _byteskwargs(opts)
974 974 timer, fm = gettimer(ui, opts)
975 975 def d():
976 976 if os.name != r'nt':
977 977 os.system(b"HGRCPATH= %s version -q > /dev/null" %
978 978 fsencode(sys.argv[0]))
979 979 else:
980 980 os.environ[r'HGRCPATH'] = r' '
981 981 os.system(r"%s version -q > NUL" % sys.argv[0])
982 982 timer(d)
983 983 fm.end()
984 984
985 985 @command(b'perfparents', formatteropts)
986 986 def perfparents(ui, repo, **opts):
987 987 opts = _byteskwargs(opts)
988 988 timer, fm = gettimer(ui, opts)
989 989 # control the number of commits perfparents iterates over
990 990 # experimental config: perf.parentscount
991 991 count = getint(ui, b"perf", b"parentscount", 1000)
992 992 if len(repo.changelog) < count:
993 993 raise error.Abort(b"repo needs %d commits for this test" % count)
994 994 repo = repo.unfiltered()
995 995 nl = [repo.changelog.node(i) for i in _xrange(count)]
996 996 def d():
997 997 for n in nl:
998 998 repo.changelog.parents(n)
999 999 timer(d)
1000 1000 fm.end()
1001 1001
1002 1002 @command(b'perfctxfiles', formatteropts)
1003 1003 def perfctxfiles(ui, repo, x, **opts):
1004 1004 opts = _byteskwargs(opts)
1005 1005 x = int(x)
1006 1006 timer, fm = gettimer(ui, opts)
1007 1007 def d():
1008 1008 len(repo[x].files())
1009 1009 timer(d)
1010 1010 fm.end()
1011 1011
1012 1012 @command(b'perfrawfiles', formatteropts)
1013 1013 def perfrawfiles(ui, repo, x, **opts):
1014 1014 opts = _byteskwargs(opts)
1015 1015 x = int(x)
1016 1016 timer, fm = gettimer(ui, opts)
1017 1017 cl = repo.changelog
1018 1018 def d():
1019 1019 len(cl.read(x)[3])
1020 1020 timer(d)
1021 1021 fm.end()
1022 1022
1023 1023 @command(b'perflookup', formatteropts)
1024 1024 def perflookup(ui, repo, rev, **opts):
1025 1025 opts = _byteskwargs(opts)
1026 1026 timer, fm = gettimer(ui, opts)
1027 1027 timer(lambda: len(repo.lookup(rev)))
1028 1028 fm.end()
1029 1029
1030 1030 @command(b'perflinelogedits',
1031 1031 [(b'n', b'edits', 10000, b'number of edits'),
1032 1032 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1033 1033 ], norepo=True)
1034 1034 def perflinelogedits(ui, **opts):
1035 1035 from mercurial import linelog
1036 1036
1037 1037 opts = _byteskwargs(opts)
1038 1038
1039 1039 edits = opts[b'edits']
1040 1040 maxhunklines = opts[b'max_hunk_lines']
1041 1041
1042 1042 maxb1 = 100000
1043 1043 random.seed(0)
1044 1044 randint = random.randint
1045 1045 currentlines = 0
1046 1046 arglist = []
1047 1047 for rev in _xrange(edits):
1048 1048 a1 = randint(0, currentlines)
1049 1049 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1050 1050 b1 = randint(0, maxb1)
1051 1051 b2 = randint(b1, b1 + maxhunklines)
1052 1052 currentlines += (b2 - b1) - (a2 - a1)
1053 1053 arglist.append((rev, a1, a2, b1, b2))
1054 1054
1055 1055 def d():
1056 1056 ll = linelog.linelog()
1057 1057 for args in arglist:
1058 1058 ll.replacelines(*args)
1059 1059
1060 1060 timer, fm = gettimer(ui, opts)
1061 1061 timer(d)
1062 1062 fm.end()
1063 1063
1064 1064 @command(b'perfrevrange', formatteropts)
1065 1065 def perfrevrange(ui, repo, *specs, **opts):
1066 1066 opts = _byteskwargs(opts)
1067 1067 timer, fm = gettimer(ui, opts)
1068 1068 revrange = scmutil.revrange
1069 1069 timer(lambda: len(revrange(repo, specs)))
1070 1070 fm.end()
1071 1071
1072 1072 @command(b'perfnodelookup', formatteropts)
1073 1073 def perfnodelookup(ui, repo, rev, **opts):
1074 1074 opts = _byteskwargs(opts)
1075 1075 timer, fm = gettimer(ui, opts)
1076 1076 import mercurial.revlog
1077 1077 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1078 1078 n = scmutil.revsingle(repo, rev).node()
1079 1079 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1080 1080 def d():
1081 1081 cl.rev(n)
1082 1082 clearcaches(cl)
1083 1083 timer(d)
1084 1084 fm.end()
1085 1085
1086 1086 @command(b'perflog',
1087 1087 [(b'', b'rename', False, b'ask log to follow renames')
1088 1088 ] + formatteropts)
1089 1089 def perflog(ui, repo, rev=None, **opts):
1090 1090 opts = _byteskwargs(opts)
1091 1091 if rev is None:
1092 1092 rev=[]
1093 1093 timer, fm = gettimer(ui, opts)
1094 1094 ui.pushbuffer()
1095 1095 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1096 1096 copies=opts.get(b'rename')))
1097 1097 ui.popbuffer()
1098 1098 fm.end()
1099 1099
1100 1100 @command(b'perfmoonwalk', formatteropts)
1101 1101 def perfmoonwalk(ui, repo, **opts):
1102 1102 """benchmark walking the changelog backwards
1103 1103
1104 1104 This also loads the changelog data for each revision in the changelog.
1105 1105 """
1106 1106 opts = _byteskwargs(opts)
1107 1107 timer, fm = gettimer(ui, opts)
1108 1108 def moonwalk():
1109 1109 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1110 1110 ctx = repo[i]
1111 1111 ctx.branch() # read changelog data (in addition to the index)
1112 1112 timer(moonwalk)
1113 1113 fm.end()
1114 1114
1115 1115 @command(b'perftemplating',
1116 1116 [(b'r', b'rev', [], b'revisions to run the template on'),
1117 1117 ] + formatteropts)
1118 1118 def perftemplating(ui, repo, testedtemplate=None, **opts):
1119 1119 """test the rendering time of a given template"""
1120 1120 if makelogtemplater is None:
1121 1121 raise error.Abort((b"perftemplating not available with this Mercurial"),
1122 1122 hint=b"use 4.3 or later")
1123 1123
1124 1124 opts = _byteskwargs(opts)
1125 1125
1126 1126 nullui = ui.copy()
1127 1127 nullui.fout = open(os.devnull, r'wb')
1128 1128 nullui.disablepager()
1129 1129 revs = opts.get(b'rev')
1130 1130 if not revs:
1131 1131 revs = [b'all()']
1132 1132 revs = list(scmutil.revrange(repo, revs))
1133 1133
1134 1134 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1135 1135 b' {author|person}: {desc|firstline}\n')
1136 1136 if testedtemplate is None:
1137 1137 testedtemplate = defaulttemplate
1138 1138 displayer = makelogtemplater(nullui, repo, testedtemplate)
1139 1139 def format():
1140 1140 for r in revs:
1141 1141 ctx = repo[r]
1142 1142 displayer.show(ctx)
1143 1143 displayer.flush(ctx)
1144 1144
1145 1145 timer, fm = gettimer(ui, opts)
1146 1146 timer(format)
1147 1147 fm.end()
1148 1148
1149 1149 @command(b'perfcca', formatteropts)
1150 1150 def perfcca(ui, repo, **opts):
1151 1151 opts = _byteskwargs(opts)
1152 1152 timer, fm = gettimer(ui, opts)
1153 1153 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1154 1154 fm.end()
1155 1155
1156 1156 @command(b'perffncacheload', formatteropts)
1157 1157 def perffncacheload(ui, repo, **opts):
1158 1158 opts = _byteskwargs(opts)
1159 1159 timer, fm = gettimer(ui, opts)
1160 1160 s = repo.store
1161 1161 def d():
1162 1162 s.fncache._load()
1163 1163 timer(d)
1164 1164 fm.end()
1165 1165
1166 1166 @command(b'perffncachewrite', formatteropts)
1167 1167 def perffncachewrite(ui, repo, **opts):
1168 1168 opts = _byteskwargs(opts)
1169 1169 timer, fm = gettimer(ui, opts)
1170 1170 s = repo.store
1171 1171 lock = repo.lock()
1172 1172 s.fncache._load()
1173 1173 tr = repo.transaction(b'perffncachewrite')
1174 1174 tr.addbackup(b'fncache')
1175 1175 def d():
1176 1176 s.fncache._dirty = True
1177 1177 s.fncache.write(tr)
1178 1178 timer(d)
1179 1179 tr.close()
1180 1180 lock.release()
1181 1181 fm.end()
1182 1182
1183 1183 @command(b'perffncacheencode', formatteropts)
1184 1184 def perffncacheencode(ui, repo, **opts):
1185 1185 opts = _byteskwargs(opts)
1186 1186 timer, fm = gettimer(ui, opts)
1187 1187 s = repo.store
1188 1188 s.fncache._load()
1189 1189 def d():
1190 1190 for p in s.fncache.entries:
1191 1191 s.encode(p)
1192 1192 timer(d)
1193 1193 fm.end()
1194 1194
1195 1195 def _bdiffworker(q, blocks, xdiff, ready, done):
1196 1196 while not done.is_set():
1197 1197 pair = q.get()
1198 1198 while pair is not None:
1199 1199 if xdiff:
1200 1200 mdiff.bdiff.xdiffblocks(*pair)
1201 1201 elif blocks:
1202 1202 mdiff.bdiff.blocks(*pair)
1203 1203 else:
1204 1204 mdiff.textdiff(*pair)
1205 1205 q.task_done()
1206 1206 pair = q.get()
1207 1207 q.task_done() # for the None one
1208 1208 with ready:
1209 1209 ready.wait()
1210 1210
1211 1211 def _manifestrevision(repo, mnode):
1212 1212 ml = repo.manifestlog
1213 1213
1214 1214 if util.safehasattr(ml, b'getstorage'):
1215 1215 store = ml.getstorage(b'')
1216 1216 else:
1217 1217 store = ml._revlog
1218 1218
1219 1219 return store.revision(mnode)
1220 1220
1221 1221 @command(b'perfbdiff', revlogopts + formatteropts + [
1222 1222 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1223 1223 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1224 1224 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1225 1225 (b'', b'blocks', False, b'test computing diffs into blocks'),
1226 1226 (b'', b'xdiff', False, b'use xdiff algorithm'),
1227 1227 ],
1228 1228
1229 1229 b'-c|-m|FILE REV')
1230 1230 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1231 1231 """benchmark a bdiff between revisions
1232 1232
1233 1233 By default, benchmark a bdiff between its delta parent and itself.
1234 1234
1235 1235 With ``--count``, benchmark bdiffs between delta parents and self for N
1236 1236 revisions starting at the specified revision.
1237 1237
1238 1238 With ``--alldata``, assume the requested revision is a changeset and
1239 1239 measure bdiffs for all changes related to that changeset (manifest
1240 1240 and filelogs).
1241 1241 """
1242 1242 opts = _byteskwargs(opts)
1243 1243
1244 1244 if opts[b'xdiff'] and not opts[b'blocks']:
1245 1245 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1246 1246
1247 1247 if opts[b'alldata']:
1248 1248 opts[b'changelog'] = True
1249 1249
1250 1250 if opts.get(b'changelog') or opts.get(b'manifest'):
1251 1251 file_, rev = None, file_
1252 1252 elif rev is None:
1253 1253 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1254 1254
1255 1255 blocks = opts[b'blocks']
1256 1256 xdiff = opts[b'xdiff']
1257 1257 textpairs = []
1258 1258
1259 1259 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1260 1260
1261 1261 startrev = r.rev(r.lookup(rev))
1262 1262 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1263 1263 if opts[b'alldata']:
1264 1264 # Load revisions associated with changeset.
1265 1265 ctx = repo[rev]
1266 1266 mtext = _manifestrevision(repo, ctx.manifestnode())
1267 1267 for pctx in ctx.parents():
1268 1268 pman = _manifestrevision(repo, pctx.manifestnode())
1269 1269 textpairs.append((pman, mtext))
1270 1270
1271 1271 # Load filelog revisions by iterating manifest delta.
1272 1272 man = ctx.manifest()
1273 1273 pman = ctx.p1().manifest()
1274 1274 for filename, change in pman.diff(man).items():
1275 1275 fctx = repo.file(filename)
1276 1276 f1 = fctx.revision(change[0][0] or -1)
1277 1277 f2 = fctx.revision(change[1][0] or -1)
1278 1278 textpairs.append((f1, f2))
1279 1279 else:
1280 1280 dp = r.deltaparent(rev)
1281 1281 textpairs.append((r.revision(dp), r.revision(rev)))
1282 1282
1283 1283 withthreads = threads > 0
1284 1284 if not withthreads:
1285 1285 def d():
1286 1286 for pair in textpairs:
1287 1287 if xdiff:
1288 1288 mdiff.bdiff.xdiffblocks(*pair)
1289 1289 elif blocks:
1290 1290 mdiff.bdiff.blocks(*pair)
1291 1291 else:
1292 1292 mdiff.textdiff(*pair)
1293 1293 else:
1294 1294 q = queue()
1295 1295 for i in _xrange(threads):
1296 1296 q.put(None)
1297 1297 ready = threading.Condition()
1298 1298 done = threading.Event()
1299 1299 for i in _xrange(threads):
1300 1300 threading.Thread(target=_bdiffworker,
1301 1301 args=(q, blocks, xdiff, ready, done)).start()
1302 1302 q.join()
1303 1303 def d():
1304 1304 for pair in textpairs:
1305 1305 q.put(pair)
1306 1306 for i in _xrange(threads):
1307 1307 q.put(None)
1308 1308 with ready:
1309 1309 ready.notify_all()
1310 1310 q.join()
1311 1311 timer, fm = gettimer(ui, opts)
1312 1312 timer(d)
1313 1313 fm.end()
1314 1314
1315 1315 if withthreads:
1316 1316 done.set()
1317 1317 for i in _xrange(threads):
1318 1318 q.put(None)
1319 1319 with ready:
1320 1320 ready.notify_all()
1321 1321
1322 1322 @command(b'perfunidiff', revlogopts + formatteropts + [
1323 1323 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1324 1324 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
1325 1325 ], b'-c|-m|FILE REV')
1326 1326 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1327 1327 """benchmark a unified diff between revisions
1328 1328
1329 1329 This doesn't include any copy tracing - it's just a unified diff
1330 1330 of the texts.
1331 1331
1332 1332 By default, benchmark a diff between its delta parent and itself.
1333 1333
1334 1334 With ``--count``, benchmark diffs between delta parents and self for N
1335 1335 revisions starting at the specified revision.
1336 1336
1337 1337 With ``--alldata``, assume the requested revision is a changeset and
1338 1338 measure diffs for all changes related to that changeset (manifest
1339 1339 and filelogs).
1340 1340 """
1341 1341 opts = _byteskwargs(opts)
1342 1342 if opts[b'alldata']:
1343 1343 opts[b'changelog'] = True
1344 1344
1345 1345 if opts.get(b'changelog') or opts.get(b'manifest'):
1346 1346 file_, rev = None, file_
1347 1347 elif rev is None:
1348 1348 raise error.CommandError(b'perfunidiff', b'invalid arguments')
1349 1349
1350 1350 textpairs = []
1351 1351
1352 1352 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
1353 1353
1354 1354 startrev = r.rev(r.lookup(rev))
1355 1355 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1356 1356 if opts[b'alldata']:
1357 1357 # Load revisions associated with changeset.
1358 1358 ctx = repo[rev]
1359 1359 mtext = _manifestrevision(repo, ctx.manifestnode())
1360 1360 for pctx in ctx.parents():
1361 1361 pman = _manifestrevision(repo, pctx.manifestnode())
1362 1362 textpairs.append((pman, mtext))
1363 1363
1364 1364 # Load filelog revisions by iterating manifest delta.
1365 1365 man = ctx.manifest()
1366 1366 pman = ctx.p1().manifest()
1367 1367 for filename, change in pman.diff(man).items():
1368 1368 fctx = repo.file(filename)
1369 1369 f1 = fctx.revision(change[0][0] or -1)
1370 1370 f2 = fctx.revision(change[1][0] or -1)
1371 1371 textpairs.append((f1, f2))
1372 1372 else:
1373 1373 dp = r.deltaparent(rev)
1374 1374 textpairs.append((r.revision(dp), r.revision(rev)))
1375 1375
1376 1376 def d():
1377 1377 for left, right in textpairs:
1378 1378 # The date strings don't matter, so we pass empty strings.
1379 1379 headerlines, hunks = mdiff.unidiff(
1380 1380 left, b'', right, b'', b'left', b'right', binary=False)
1381 1381 # consume iterators in roughly the way patch.py does
1382 1382 b'\n'.join(headerlines)
1383 1383 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1384 1384 timer, fm = gettimer(ui, opts)
1385 1385 timer(d)
1386 1386 fm.end()
1387 1387
1388 1388 @command(b'perfdiffwd', formatteropts)
1389 1389 def perfdiffwd(ui, repo, **opts):
1390 1390 """Profile diff of working directory changes"""
1391 1391 opts = _byteskwargs(opts)
1392 1392 timer, fm = gettimer(ui, opts)
1393 1393 options = {
1394 1394 'w': 'ignore_all_space',
1395 1395 'b': 'ignore_space_change',
1396 1396 'B': 'ignore_blank_lines',
1397 1397 }
1398 1398
1399 1399 for diffopt in ('', 'w', 'b', 'B', 'wB'):
1400 1400 opts = dict((options[c], b'1') for c in diffopt)
1401 1401 def d():
1402 1402 ui.pushbuffer()
1403 1403 commands.diff(ui, repo, **opts)
1404 1404 ui.popbuffer()
1405 1405 diffopt = diffopt.encode('ascii')
1406 1406 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
1407 1407 timer(d, title)
1408 1408 fm.end()
1409 1409
1410 1410 @command(b'perfrevlogindex', revlogopts + formatteropts,
1411 1411 b'-c|-m|FILE')
1412 1412 def perfrevlogindex(ui, repo, file_=None, **opts):
1413 1413 """Benchmark operations against a revlog index.
1414 1414
1415 1415 This tests constructing a revlog instance, reading index data,
1416 1416 parsing index data, and performing various operations related to
1417 1417 index data.
1418 1418 """
1419 1419
1420 1420 opts = _byteskwargs(opts)
1421 1421
1422 1422 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
1423 1423
1424 1424 opener = getattr(rl, 'opener') # trick linter
1425 1425 indexfile = rl.indexfile
1426 1426 data = opener.read(indexfile)
1427 1427
1428 1428 header = struct.unpack(b'>I', data[0:4])[0]
1429 1429 version = header & 0xFFFF
1430 1430 if version == 1:
1431 1431 revlogio = revlog.revlogio()
1432 1432 inline = header & (1 << 16)
1433 1433 else:
1434 1434 raise error.Abort((b'unsupported revlog version: %d') % version)
1435 1435
1436 1436 rllen = len(rl)
1437 1437
1438 1438 node0 = rl.node(0)
1439 1439 node25 = rl.node(rllen // 4)
1440 1440 node50 = rl.node(rllen // 2)
1441 1441 node75 = rl.node(rllen // 4 * 3)
1442 1442 node100 = rl.node(rllen - 1)
1443 1443
1444 1444 allrevs = range(rllen)
1445 1445 allrevsrev = list(reversed(allrevs))
1446 1446 allnodes = [rl.node(rev) for rev in range(rllen)]
1447 1447 allnodesrev = list(reversed(allnodes))
1448 1448
1449 1449 def constructor():
1450 1450 revlog.revlog(opener, indexfile)
1451 1451
1452 1452 def read():
1453 1453 with opener(indexfile) as fh:
1454 1454 fh.read()
1455 1455
1456 1456 def parseindex():
1457 1457 revlogio.parseindex(data, inline)
1458 1458
1459 1459 def getentry(revornode):
1460 1460 index = revlogio.parseindex(data, inline)[0]
1461 1461 index[revornode]
1462 1462
1463 1463 def getentries(revs, count=1):
1464 1464 index = revlogio.parseindex(data, inline)[0]
1465 1465
1466 1466 for i in range(count):
1467 1467 for rev in revs:
1468 1468 index[rev]
1469 1469
1470 1470 def resolvenode(node):
1471 1471 nodemap = revlogio.parseindex(data, inline)[1]
1472 1472 # This only works for the C code.
1473 1473 if nodemap is None:
1474 1474 return
1475 1475
1476 1476 try:
1477 1477 nodemap[node]
1478 1478 except error.RevlogError:
1479 1479 pass
1480 1480
1481 1481 def resolvenodes(nodes, count=1):
1482 1482 nodemap = revlogio.parseindex(data, inline)[1]
1483 1483 if nodemap is None:
1484 1484 return
1485 1485
1486 1486 for i in range(count):
1487 1487 for node in nodes:
1488 1488 try:
1489 1489 nodemap[node]
1490 1490 except error.RevlogError:
1491 1491 pass
1492 1492
1493 1493 benches = [
1494 1494 (constructor, b'revlog constructor'),
1495 1495 (read, b'read'),
1496 1496 (parseindex, b'create index object'),
1497 1497 (lambda: getentry(0), b'retrieve index entry for rev 0'),
1498 1498 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
1499 1499 (lambda: resolvenode(node0), b'look up node at rev 0'),
1500 1500 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
1501 1501 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
1502 1502 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
1503 1503 (lambda: resolvenode(node100), b'look up node at tip'),
1504 1504 # 2x variation is to measure caching impact.
1505 1505 (lambda: resolvenodes(allnodes),
1506 1506 b'look up all nodes (forward)'),
1507 1507 (lambda: resolvenodes(allnodes, 2),
1508 1508 b'look up all nodes 2x (forward)'),
1509 1509 (lambda: resolvenodes(allnodesrev),
1510 1510 b'look up all nodes (reverse)'),
1511 1511 (lambda: resolvenodes(allnodesrev, 2),
1512 1512 b'look up all nodes 2x (reverse)'),
1513 1513 (lambda: getentries(allrevs),
1514 1514 b'retrieve all index entries (forward)'),
1515 1515 (lambda: getentries(allrevs, 2),
1516 1516 b'retrieve all index entries 2x (forward)'),
1517 1517 (lambda: getentries(allrevsrev),
1518 1518 b'retrieve all index entries (reverse)'),
1519 1519 (lambda: getentries(allrevsrev, 2),
1520 1520 b'retrieve all index entries 2x (reverse)'),
1521 1521 ]
1522 1522
1523 1523 for fn, title in benches:
1524 1524 timer, fm = gettimer(ui, opts)
1525 1525 timer(fn, title=title)
1526 1526 fm.end()
1527 1527
1528 1528 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
1529 1529 [(b'd', b'dist', 100, b'distance between the revisions'),
1530 1530 (b's', b'startrev', 0, b'revision to start reading at'),
1531 1531 (b'', b'reverse', False, b'read in reverse')],
1532 1532 b'-c|-m|FILE')
1533 1533 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
1534 1534 **opts):
1535 1535 """Benchmark reading a series of revisions from a revlog.
1536 1536
1537 1537 By default, we read every ``-d/--dist`` revision from 0 to tip of
1538 1538 the specified revlog.
1539 1539
1540 1540 The start revision can be defined via ``-s/--startrev``.
1541 1541 """
1542 1542 opts = _byteskwargs(opts)
1543 1543
1544 1544 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
1545 1545 rllen = getlen(ui)(rl)
1546 1546
1547 1547 if startrev < 0:
1548 1548 startrev = rllen + startrev
1549 1549
1550 1550 def d():
1551 1551 rl.clearcaches()
1552 1552
1553 1553 beginrev = startrev
1554 1554 endrev = rllen
1555 1555 dist = opts[b'dist']
1556 1556
1557 1557 if reverse:
1558 1558 beginrev, endrev = endrev - 1, beginrev - 1
1559 1559 dist = -1 * dist
1560 1560
1561 1561 for x in _xrange(beginrev, endrev, dist):
1562 1562 # Old revisions don't support passing int.
1563 1563 n = rl.node(x)
1564 1564 rl.revision(n)
1565 1565
1566 1566 timer, fm = gettimer(ui, opts)
1567 1567 timer(d)
1568 1568 fm.end()
1569 1569
1570 1570 @command(b'perfrevlogwrite', revlogopts + formatteropts +
1571 1571 [(b's', b'startrev', 1000, b'revision to start writing at'),
1572 1572 (b'', b'stoprev', -1, b'last revision to write'),
1573 1573 (b'', b'count', 3, b'last revision to write'),
1574 1574 (b'', b'details', False, b'print timing for every revisions tested'),
1575 1575 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
1576 1576 ],
1577 1577 b'-c|-m|FILE')
1578 1578 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
1579 1579 """Benchmark writing a series of revisions to a revlog.
1580 1580
1581 1581 Possible source values are:
1582 1582 * `full`: add from a full text (default).
1583 1583 * `parent-1`: add from a delta to the first parent
1584 1584 * `parent-2`: add from a delta to the second parent if it exists
1585 1585 (use a delta from the first parent otherwise)
1586 1586 * `parent-smallest`: add from the smallest delta (either p1 or p2)
1587 * `storage`: add from the existing precomputed deltas
1587 1588 """
1588 1589 opts = _byteskwargs(opts)
1589 1590
1590 1591 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
1591 1592 rllen = getlen(ui)(rl)
1592 1593 if startrev < 0:
1593 1594 startrev = rllen + startrev
1594 1595 if stoprev < 0:
1595 1596 stoprev = rllen + stoprev
1596 1597
1597 1598 source = opts['source']
1598 validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest')
1599 validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest',
1600 b'storage')
1599 1601 if source not in validsource:
1600 1602 raise error.Abort('invalid source type: %s' % source)
1601 1603
1602 1604 ### actually gather results
1603 1605 count = opts['count']
1604 1606 if count <= 0:
1605 1607 raise error.Abort('invalide run count: %d' % count)
1606 1608 allresults = []
1607 1609 for c in range(count):
1608 1610 timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1)
1609 1611 allresults.append(timing)
1610 1612
1611 1613 ### consolidate the results in a single list
1612 1614 results = []
1613 1615 for idx, (rev, t) in enumerate(allresults[0]):
1614 1616 ts = [t]
1615 1617 for other in allresults[1:]:
1616 1618 orev, ot = other[idx]
1617 1619 assert orev == rev
1618 1620 ts.append(ot)
1619 1621 results.append((rev, ts))
1620 1622 resultcount = len(results)
1621 1623
1622 1624 ### Compute and display relevant statistics
1623 1625
1624 1626 # get a formatter
1625 1627 fm = ui.formatter(b'perf', opts)
1626 1628 displayall = ui.configbool(b"perf", b"all-timing", False)
1627 1629
1628 1630 # print individual details if requested
1629 1631 if opts['details']:
1630 1632 for idx, item in enumerate(results, 1):
1631 1633 rev, data = item
1632 1634 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
1633 1635 formatone(fm, data, title=title, displayall=displayall)
1634 1636
1635 1637 # sorts results by median time
1636 1638 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
1637 1639 # list of (name, index) to display)
1638 1640 relevants = [
1639 1641 ("min", 0),
1640 1642 ("10%", resultcount * 10 // 100),
1641 1643 ("25%", resultcount * 25 // 100),
1642 1644 ("50%", resultcount * 70 // 100),
1643 1645 ("75%", resultcount * 75 // 100),
1644 1646 ("90%", resultcount * 90 // 100),
1645 1647 ("95%", resultcount * 95 // 100),
1646 1648 ("99%", resultcount * 99 // 100),
1647 1649 ("max", -1),
1648 1650 ]
1649 1651 if not ui.quiet:
1650 1652 for name, idx in relevants:
1651 1653 data = results[idx]
1652 1654 title = '%s of %d, rev %d' % (name, resultcount, data[0])
1653 1655 formatone(fm, data[1], title=title, displayall=displayall)
1654 1656
1655 1657 # XXX summing that many float will not be very precise, we ignore this fact
1656 1658 # for now
1657 1659 totaltime = []
1658 1660 for item in allresults:
1659 1661 totaltime.append((sum(x[1][0] for x in item),
1660 1662 sum(x[1][1] for x in item),
1661 1663 sum(x[1][2] for x in item),)
1662 1664 )
1663 1665 formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
1664 1666 displayall=displayall)
1665 1667 fm.end()
1666 1668
1667 1669 class _faketr(object):
1668 1670 def add(s, x, y, z=None):
1669 1671 return None
1670 1672
1671 1673 def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None):
1672 1674 timings = []
1673 1675 tr = _faketr()
1674 1676 with _temprevlog(ui, orig, startrev) as dest:
1675 1677 revs = list(orig.revs(startrev, stoprev))
1676 1678 total = len(revs)
1677 1679 topic = 'adding'
1678 1680 if runidx is not None:
1679 1681 topic += ' (run #%d)' % runidx
1680 1682 for idx, rev in enumerate(revs):
1681 1683 ui.progress(topic, idx, unit='revs', total=total)
1682 1684 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
1683 1685 with timeone() as r:
1684 1686 dest.addrawrevision(*addargs, **addkwargs)
1685 1687 timings.append((rev, r[0]))
1686 1688 ui.progress(topic, total, unit='revs', total=total)
1687 1689 ui.progress(topic, None, unit='revs', total=total)
1688 1690 return timings
1689 1691
1690 1692 def _getrevisionseed(orig, rev, tr, source):
1691 1693 from mercurial.node import nullid
1692 1694
1693 1695 linkrev = orig.linkrev(rev)
1694 1696 node = orig.node(rev)
1695 1697 p1, p2 = orig.parents(node)
1696 1698 flags = orig.flags(rev)
1697 1699 cachedelta = None
1698 1700 text = None
1699 1701
1700 1702 if source == b'full':
1701 1703 text = orig.revision(rev)
1702 1704 elif source == b'parent-1':
1703 1705 baserev = orig.rev(p1)
1704 1706 cachedelta = (baserev, orig.revdiff(p1, rev))
1705 1707 elif source == b'parent-2':
1706 1708 parent = p2
1707 1709 if p2 == nullid:
1708 1710 parent = p1
1709 1711 baserev = orig.rev(parent)
1710 1712 cachedelta = (baserev, orig.revdiff(parent, rev))
1711 1713 elif source == b'parent-smallest':
1712 1714 p1diff = orig.revdiff(p1, rev)
1713 1715 parent = p1
1714 1716 diff = p1diff
1715 1717 if p2 != nullid:
1716 1718 p2diff = orig.revdiff(p2, rev)
1717 1719 if len(p1diff) > len(p2diff):
1718 1720 parent = p2
1719 1721 diff = p2diff
1720 1722 baserev = orig.rev(parent)
1721 1723 cachedelta = (baserev, diff)
1724 elif source == b'storage':
1725 baserev = orig.deltaparent(rev)
1726 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
1722 1727
1723 1728 return ((text, tr, linkrev, p1, p2),
1724 1729 {'node': node, 'flags': flags, 'cachedelta': cachedelta})
1725 1730
1726 1731 @contextlib.contextmanager
1727 1732 def _temprevlog(ui, orig, truncaterev):
1728 1733 from mercurial import vfs as vfsmod
1729 1734
1730 1735 if orig._inline:
1731 1736 raise error.Abort('not supporting inline revlog (yet)')
1732 1737
1733 1738 origindexpath = orig.opener.join(orig.indexfile)
1734 1739 origdatapath = orig.opener.join(orig.datafile)
1735 1740 indexname = 'revlog.i'
1736 1741 dataname = 'revlog.d'
1737 1742
1738 1743 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
1739 1744 try:
1740 1745 # copy the data file in a temporary directory
1741 1746 ui.debug('copying data in %s\n' % tmpdir)
1742 1747 destindexpath = os.path.join(tmpdir, 'revlog.i')
1743 1748 destdatapath = os.path.join(tmpdir, 'revlog.d')
1744 1749 shutil.copyfile(origindexpath, destindexpath)
1745 1750 shutil.copyfile(origdatapath, destdatapath)
1746 1751
1747 1752 # remove the data we want to add again
1748 1753 ui.debug('truncating data to be rewritten\n')
1749 1754 with open(destindexpath, 'ab') as index:
1750 1755 index.seek(0)
1751 1756 index.truncate(truncaterev * orig._io.size)
1752 1757 with open(destdatapath, 'ab') as data:
1753 1758 data.seek(0)
1754 1759 data.truncate(orig.start(truncaterev))
1755 1760
1756 1761 # instantiate a new revlog from the temporary copy
1757 1762 ui.debug('truncating adding to be rewritten\n')
1758 1763 vfs = vfsmod.vfs(tmpdir)
1759 1764 vfs.options = getattr(orig.opener, 'options', None)
1760 1765
1761 1766 dest = revlog.revlog(vfs,
1762 1767 indexfile=indexname,
1763 1768 datafile=dataname)
1764 1769 if dest._inline:
1765 1770 raise error.Abort('not supporting inline revlog (yet)')
1766 1771 # make sure internals are initialized
1767 1772 dest.revision(len(dest) - 1)
1768 1773 yield dest
1769 1774 del dest, vfs
1770 1775 finally:
1771 1776 shutil.rmtree(tmpdir, True)
1772 1777
1773 1778 @command(b'perfrevlogchunks', revlogopts + formatteropts +
1774 1779 [(b'e', b'engines', b'', b'compression engines to use'),
1775 1780 (b's', b'startrev', 0, b'revision to start at')],
1776 1781 b'-c|-m|FILE')
1777 1782 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
1778 1783 """Benchmark operations on revlog chunks.
1779 1784
1780 1785 Logically, each revlog is a collection of fulltext revisions. However,
1781 1786 stored within each revlog are "chunks" of possibly compressed data. This
1782 1787 data needs to be read and decompressed or compressed and written.
1783 1788
1784 1789 This command measures the time it takes to read+decompress and recompress
1785 1790 chunks in a revlog. It effectively isolates I/O and compression performance.
1786 1791 For measurements of higher-level operations like resolving revisions,
1787 1792 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
1788 1793 """
1789 1794 opts = _byteskwargs(opts)
1790 1795
1791 1796 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
1792 1797
1793 1798 # _chunkraw was renamed to _getsegmentforrevs.
1794 1799 try:
1795 1800 segmentforrevs = rl._getsegmentforrevs
1796 1801 except AttributeError:
1797 1802 segmentforrevs = rl._chunkraw
1798 1803
1799 1804 # Verify engines argument.
1800 1805 if engines:
1801 1806 engines = set(e.strip() for e in engines.split(b','))
1802 1807 for engine in engines:
1803 1808 try:
1804 1809 util.compressionengines[engine]
1805 1810 except KeyError:
1806 1811 raise error.Abort(b'unknown compression engine: %s' % engine)
1807 1812 else:
1808 1813 engines = []
1809 1814 for e in util.compengines:
1810 1815 engine = util.compengines[e]
1811 1816 try:
1812 1817 if engine.available():
1813 1818 engine.revlogcompressor().compress(b'dummy')
1814 1819 engines.append(e)
1815 1820 except NotImplementedError:
1816 1821 pass
1817 1822
1818 1823 revs = list(rl.revs(startrev, len(rl) - 1))
1819 1824
1820 1825 def rlfh(rl):
1821 1826 if rl._inline:
1822 1827 return getsvfs(repo)(rl.indexfile)
1823 1828 else:
1824 1829 return getsvfs(repo)(rl.datafile)
1825 1830
1826 1831 def doread():
1827 1832 rl.clearcaches()
1828 1833 for rev in revs:
1829 1834 segmentforrevs(rev, rev)
1830 1835
1831 1836 def doreadcachedfh():
1832 1837 rl.clearcaches()
1833 1838 fh = rlfh(rl)
1834 1839 for rev in revs:
1835 1840 segmentforrevs(rev, rev, df=fh)
1836 1841
1837 1842 def doreadbatch():
1838 1843 rl.clearcaches()
1839 1844 segmentforrevs(revs[0], revs[-1])
1840 1845
1841 1846 def doreadbatchcachedfh():
1842 1847 rl.clearcaches()
1843 1848 fh = rlfh(rl)
1844 1849 segmentforrevs(revs[0], revs[-1], df=fh)
1845 1850
1846 1851 def dochunk():
1847 1852 rl.clearcaches()
1848 1853 fh = rlfh(rl)
1849 1854 for rev in revs:
1850 1855 rl._chunk(rev, df=fh)
1851 1856
1852 1857 chunks = [None]
1853 1858
1854 1859 def dochunkbatch():
1855 1860 rl.clearcaches()
1856 1861 fh = rlfh(rl)
1857 1862 # Save chunks as a side-effect.
1858 1863 chunks[0] = rl._chunks(revs, df=fh)
1859 1864
1860 1865 def docompress(compressor):
1861 1866 rl.clearcaches()
1862 1867
1863 1868 try:
1864 1869 # Swap in the requested compression engine.
1865 1870 oldcompressor = rl._compressor
1866 1871 rl._compressor = compressor
1867 1872 for chunk in chunks[0]:
1868 1873 rl.compress(chunk)
1869 1874 finally:
1870 1875 rl._compressor = oldcompressor
1871 1876
1872 1877 benches = [
1873 1878 (lambda: doread(), b'read'),
1874 1879 (lambda: doreadcachedfh(), b'read w/ reused fd'),
1875 1880 (lambda: doreadbatch(), b'read batch'),
1876 1881 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
1877 1882 (lambda: dochunk(), b'chunk'),
1878 1883 (lambda: dochunkbatch(), b'chunk batch'),
1879 1884 ]
1880 1885
1881 1886 for engine in sorted(engines):
1882 1887 compressor = util.compengines[engine].revlogcompressor()
1883 1888 benches.append((functools.partial(docompress, compressor),
1884 1889 b'compress w/ %s' % engine))
1885 1890
1886 1891 for fn, title in benches:
1887 1892 timer, fm = gettimer(ui, opts)
1888 1893 timer(fn, title=title)
1889 1894 fm.end()
1890 1895
1891 1896 @command(b'perfrevlogrevision', revlogopts + formatteropts +
1892 1897 [(b'', b'cache', False, b'use caches instead of clearing')],
1893 1898 b'-c|-m|FILE REV')
1894 1899 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
1895 1900 """Benchmark obtaining a revlog revision.
1896 1901
1897 1902 Obtaining a revlog revision consists of roughly the following steps:
1898 1903
1899 1904 1. Compute the delta chain
1900 1905 2. Slice the delta chain if applicable
1901 1906 3. Obtain the raw chunks for that delta chain
1902 1907 4. Decompress each raw chunk
1903 1908 5. Apply binary patches to obtain fulltext
1904 1909 6. Verify hash of fulltext
1905 1910
1906 1911 This command measures the time spent in each of these phases.
1907 1912 """
1908 1913 opts = _byteskwargs(opts)
1909 1914
1910 1915 if opts.get(b'changelog') or opts.get(b'manifest'):
1911 1916 file_, rev = None, file_
1912 1917 elif rev is None:
1913 1918 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
1914 1919
1915 1920 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
1916 1921
1917 1922 # _chunkraw was renamed to _getsegmentforrevs.
1918 1923 try:
1919 1924 segmentforrevs = r._getsegmentforrevs
1920 1925 except AttributeError:
1921 1926 segmentforrevs = r._chunkraw
1922 1927
1923 1928 node = r.lookup(rev)
1924 1929 rev = r.rev(node)
1925 1930
1926 1931 def getrawchunks(data, chain):
1927 1932 start = r.start
1928 1933 length = r.length
1929 1934 inline = r._inline
1930 1935 iosize = r._io.size
1931 1936 buffer = util.buffer
1932 1937
1933 1938 chunks = []
1934 1939 ladd = chunks.append
1935 1940 for idx, item in enumerate(chain):
1936 1941 offset = start(item[0])
1937 1942 bits = data[idx]
1938 1943 for rev in item:
1939 1944 chunkstart = start(rev)
1940 1945 if inline:
1941 1946 chunkstart += (rev + 1) * iosize
1942 1947 chunklength = length(rev)
1943 1948 ladd(buffer(bits, chunkstart - offset, chunklength))
1944 1949
1945 1950 return chunks
1946 1951
1947 1952 def dodeltachain(rev):
1948 1953 if not cache:
1949 1954 r.clearcaches()
1950 1955 r._deltachain(rev)
1951 1956
1952 1957 def doread(chain):
1953 1958 if not cache:
1954 1959 r.clearcaches()
1955 1960 for item in slicedchain:
1956 1961 segmentforrevs(item[0], item[-1])
1957 1962
1958 1963 def doslice(r, chain, size):
1959 1964 for s in slicechunk(r, chain, targetsize=size):
1960 1965 pass
1961 1966
1962 1967 def dorawchunks(data, chain):
1963 1968 if not cache:
1964 1969 r.clearcaches()
1965 1970 getrawchunks(data, chain)
1966 1971
1967 1972 def dodecompress(chunks):
1968 1973 decomp = r.decompress
1969 1974 for chunk in chunks:
1970 1975 decomp(chunk)
1971 1976
1972 1977 def dopatch(text, bins):
1973 1978 if not cache:
1974 1979 r.clearcaches()
1975 1980 mdiff.patches(text, bins)
1976 1981
1977 1982 def dohash(text):
1978 1983 if not cache:
1979 1984 r.clearcaches()
1980 1985 r.checkhash(text, node, rev=rev)
1981 1986
1982 1987 def dorevision():
1983 1988 if not cache:
1984 1989 r.clearcaches()
1985 1990 r.revision(node)
1986 1991
1987 1992 try:
1988 1993 from mercurial.revlogutils.deltas import slicechunk
1989 1994 except ImportError:
1990 1995 slicechunk = getattr(revlog, '_slicechunk', None)
1991 1996
1992 1997 size = r.length(rev)
1993 1998 chain = r._deltachain(rev)[0]
1994 1999 if not getattr(r, '_withsparseread', False):
1995 2000 slicedchain = (chain,)
1996 2001 else:
1997 2002 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
1998 2003 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
1999 2004 rawchunks = getrawchunks(data, slicedchain)
2000 2005 bins = r._chunks(chain)
2001 2006 text = bytes(bins[0])
2002 2007 bins = bins[1:]
2003 2008 text = mdiff.patches(text, bins)
2004 2009
2005 2010 benches = [
2006 2011 (lambda: dorevision(), b'full'),
2007 2012 (lambda: dodeltachain(rev), b'deltachain'),
2008 2013 (lambda: doread(chain), b'read'),
2009 2014 ]
2010 2015
2011 2016 if getattr(r, '_withsparseread', False):
2012 2017 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
2013 2018 benches.append(slicing)
2014 2019
2015 2020 benches.extend([
2016 2021 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
2017 2022 (lambda: dodecompress(rawchunks), b'decompress'),
2018 2023 (lambda: dopatch(text, bins), b'patch'),
2019 2024 (lambda: dohash(text), b'hash'),
2020 2025 ])
2021 2026
2022 2027 timer, fm = gettimer(ui, opts)
2023 2028 for fn, title in benches:
2024 2029 timer(fn, title=title)
2025 2030 fm.end()
2026 2031
2027 2032 @command(b'perfrevset',
2028 2033 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
2029 2034 (b'', b'contexts', False, b'obtain changectx for each revision')]
2030 2035 + formatteropts, b"REVSET")
2031 2036 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
2032 2037 """benchmark the execution time of a revset
2033 2038
2034 2039 Use the --clean option if need to evaluate the impact of build volatile
2035 2040 revisions set cache on the revset execution. Volatile cache hold filtered
2036 2041 and obsolete related cache."""
2037 2042 opts = _byteskwargs(opts)
2038 2043
2039 2044 timer, fm = gettimer(ui, opts)
2040 2045 def d():
2041 2046 if clear:
2042 2047 repo.invalidatevolatilesets()
2043 2048 if contexts:
2044 2049 for ctx in repo.set(expr): pass
2045 2050 else:
2046 2051 for r in repo.revs(expr): pass
2047 2052 timer(d)
2048 2053 fm.end()
2049 2054
2050 2055 @command(b'perfvolatilesets',
2051 2056 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
2052 2057 ] + formatteropts)
2053 2058 def perfvolatilesets(ui, repo, *names, **opts):
2054 2059 """benchmark the computation of various volatile set
2055 2060
2056 2061 Volatile set computes element related to filtering and obsolescence."""
2057 2062 opts = _byteskwargs(opts)
2058 2063 timer, fm = gettimer(ui, opts)
2059 2064 repo = repo.unfiltered()
2060 2065
2061 2066 def getobs(name):
2062 2067 def d():
2063 2068 repo.invalidatevolatilesets()
2064 2069 if opts[b'clear_obsstore']:
2065 2070 clearfilecache(repo, b'obsstore')
2066 2071 obsolete.getrevs(repo, name)
2067 2072 return d
2068 2073
2069 2074 allobs = sorted(obsolete.cachefuncs)
2070 2075 if names:
2071 2076 allobs = [n for n in allobs if n in names]
2072 2077
2073 2078 for name in allobs:
2074 2079 timer(getobs(name), title=name)
2075 2080
2076 2081 def getfiltered(name):
2077 2082 def d():
2078 2083 repo.invalidatevolatilesets()
2079 2084 if opts[b'clear_obsstore']:
2080 2085 clearfilecache(repo, b'obsstore')
2081 2086 repoview.filterrevs(repo, name)
2082 2087 return d
2083 2088
2084 2089 allfilter = sorted(repoview.filtertable)
2085 2090 if names:
2086 2091 allfilter = [n for n in allfilter if n in names]
2087 2092
2088 2093 for name in allfilter:
2089 2094 timer(getfiltered(name), title=name)
2090 2095 fm.end()
2091 2096
2092 2097 @command(b'perfbranchmap',
2093 2098 [(b'f', b'full', False,
2094 2099 b'Includes build time of subset'),
2095 2100 (b'', b'clear-revbranch', False,
2096 2101 b'purge the revbranch cache between computation'),
2097 2102 ] + formatteropts)
2098 2103 def perfbranchmap(ui, repo, *filternames, **opts):
2099 2104 """benchmark the update of a branchmap
2100 2105
2101 2106 This benchmarks the full repo.branchmap() call with read and write disabled
2102 2107 """
2103 2108 opts = _byteskwargs(opts)
2104 2109 full = opts.get(b"full", False)
2105 2110 clear_revbranch = opts.get(b"clear_revbranch", False)
2106 2111 timer, fm = gettimer(ui, opts)
2107 2112 def getbranchmap(filtername):
2108 2113 """generate a benchmark function for the filtername"""
2109 2114 if filtername is None:
2110 2115 view = repo
2111 2116 else:
2112 2117 view = repo.filtered(filtername)
2113 2118 def d():
2114 2119 if clear_revbranch:
2115 2120 repo.revbranchcache()._clear()
2116 2121 if full:
2117 2122 view._branchcaches.clear()
2118 2123 else:
2119 2124 view._branchcaches.pop(filtername, None)
2120 2125 view.branchmap()
2121 2126 return d
2122 2127 # add filter in smaller subset to bigger subset
2123 2128 possiblefilters = set(repoview.filtertable)
2124 2129 if filternames:
2125 2130 possiblefilters &= set(filternames)
2126 2131 subsettable = getbranchmapsubsettable()
2127 2132 allfilters = []
2128 2133 while possiblefilters:
2129 2134 for name in possiblefilters:
2130 2135 subset = subsettable.get(name)
2131 2136 if subset not in possiblefilters:
2132 2137 break
2133 2138 else:
2134 2139 assert False, b'subset cycle %s!' % possiblefilters
2135 2140 allfilters.append(name)
2136 2141 possiblefilters.remove(name)
2137 2142
2138 2143 # warm the cache
2139 2144 if not full:
2140 2145 for name in allfilters:
2141 2146 repo.filtered(name).branchmap()
2142 2147 if not filternames or b'unfiltered' in filternames:
2143 2148 # add unfiltered
2144 2149 allfilters.append(None)
2145 2150
2146 2151 branchcacheread = safeattrsetter(branchmap, b'read')
2147 2152 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
2148 2153 branchcacheread.set(lambda repo: None)
2149 2154 branchcachewrite.set(lambda bc, repo: None)
2150 2155 try:
2151 2156 for name in allfilters:
2152 2157 printname = name
2153 2158 if name is None:
2154 2159 printname = b'unfiltered'
2155 2160 timer(getbranchmap(name), title=str(printname))
2156 2161 finally:
2157 2162 branchcacheread.restore()
2158 2163 branchcachewrite.restore()
2159 2164 fm.end()
2160 2165
2161 2166 @command(b'perfbranchmapload', [
2162 2167 (b'f', b'filter', b'', b'Specify repoview filter'),
2163 2168 (b'', b'list', False, b'List brachmap filter caches'),
2164 2169 ] + formatteropts)
2165 2170 def perfbranchmapread(ui, repo, filter=b'', list=False, **opts):
2166 2171 """benchmark reading the branchmap"""
2167 2172 opts = _byteskwargs(opts)
2168 2173
2169 2174 if list:
2170 2175 for name, kind, st in repo.cachevfs.readdir(stat=True):
2171 2176 if name.startswith(b'branch2'):
2172 2177 filtername = name.partition(b'-')[2] or b'unfiltered'
2173 2178 ui.status(b'%s - %s\n'
2174 2179 % (filtername, util.bytecount(st.st_size)))
2175 2180 return
2176 2181 if filter:
2177 2182 repo = repoview.repoview(repo, filter)
2178 2183 else:
2179 2184 repo = repo.unfiltered()
2180 2185 # try once without timer, the filter may not be cached
2181 2186 if branchmap.read(repo) is None:
2182 2187 raise error.Abort(b'No brachmap cached for %s repo'
2183 2188 % (filter or b'unfiltered'))
2184 2189 timer, fm = gettimer(ui, opts)
2185 2190 timer(lambda: branchmap.read(repo) and None)
2186 2191 fm.end()
2187 2192
2188 2193 @command(b'perfloadmarkers')
2189 2194 def perfloadmarkers(ui, repo):
2190 2195 """benchmark the time to parse the on-disk markers for a repo
2191 2196
2192 2197 Result is the number of markers in the repo."""
2193 2198 timer, fm = gettimer(ui)
2194 2199 svfs = getsvfs(repo)
2195 2200 timer(lambda: len(obsolete.obsstore(svfs)))
2196 2201 fm.end()
2197 2202
2198 2203 @command(b'perflrucachedict', formatteropts +
2199 2204 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
2200 2205 (b'', b'mincost', 0, b'smallest cost of items in cache'),
2201 2206 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
2202 2207 (b'', b'size', 4, b'size of cache'),
2203 2208 (b'', b'gets', 10000, b'number of key lookups'),
2204 2209 (b'', b'sets', 10000, b'number of key sets'),
2205 2210 (b'', b'mixed', 10000, b'number of mixed mode operations'),
2206 2211 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
2207 2212 norepo=True)
2208 2213 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
2209 2214 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
2210 2215 opts = _byteskwargs(opts)
2211 2216
2212 2217 def doinit():
2213 2218 for i in _xrange(10000):
2214 2219 util.lrucachedict(size)
2215 2220
2216 2221 costrange = list(range(mincost, maxcost + 1))
2217 2222
2218 2223 values = []
2219 2224 for i in _xrange(size):
2220 2225 values.append(random.randint(0, _maxint))
2221 2226
2222 2227 # Get mode fills the cache and tests raw lookup performance with no
2223 2228 # eviction.
2224 2229 getseq = []
2225 2230 for i in _xrange(gets):
2226 2231 getseq.append(random.choice(values))
2227 2232
2228 2233 def dogets():
2229 2234 d = util.lrucachedict(size)
2230 2235 for v in values:
2231 2236 d[v] = v
2232 2237 for key in getseq:
2233 2238 value = d[key]
2234 2239 value # silence pyflakes warning
2235 2240
2236 2241 def dogetscost():
2237 2242 d = util.lrucachedict(size, maxcost=costlimit)
2238 2243 for i, v in enumerate(values):
2239 2244 d.insert(v, v, cost=costs[i])
2240 2245 for key in getseq:
2241 2246 try:
2242 2247 value = d[key]
2243 2248 value # silence pyflakes warning
2244 2249 except KeyError:
2245 2250 pass
2246 2251
2247 2252 # Set mode tests insertion speed with cache eviction.
2248 2253 setseq = []
2249 2254 costs = []
2250 2255 for i in _xrange(sets):
2251 2256 setseq.append(random.randint(0, _maxint))
2252 2257 costs.append(random.choice(costrange))
2253 2258
2254 2259 def doinserts():
2255 2260 d = util.lrucachedict(size)
2256 2261 for v in setseq:
2257 2262 d.insert(v, v)
2258 2263
2259 2264 def doinsertscost():
2260 2265 d = util.lrucachedict(size, maxcost=costlimit)
2261 2266 for i, v in enumerate(setseq):
2262 2267 d.insert(v, v, cost=costs[i])
2263 2268
2264 2269 def dosets():
2265 2270 d = util.lrucachedict(size)
2266 2271 for v in setseq:
2267 2272 d[v] = v
2268 2273
2269 2274 # Mixed mode randomly performs gets and sets with eviction.
2270 2275 mixedops = []
2271 2276 for i in _xrange(mixed):
2272 2277 r = random.randint(0, 100)
2273 2278 if r < mixedgetfreq:
2274 2279 op = 0
2275 2280 else:
2276 2281 op = 1
2277 2282
2278 2283 mixedops.append((op,
2279 2284 random.randint(0, size * 2),
2280 2285 random.choice(costrange)))
2281 2286
2282 2287 def domixed():
2283 2288 d = util.lrucachedict(size)
2284 2289
2285 2290 for op, v, cost in mixedops:
2286 2291 if op == 0:
2287 2292 try:
2288 2293 d[v]
2289 2294 except KeyError:
2290 2295 pass
2291 2296 else:
2292 2297 d[v] = v
2293 2298
2294 2299 def domixedcost():
2295 2300 d = util.lrucachedict(size, maxcost=costlimit)
2296 2301
2297 2302 for op, v, cost in mixedops:
2298 2303 if op == 0:
2299 2304 try:
2300 2305 d[v]
2301 2306 except KeyError:
2302 2307 pass
2303 2308 else:
2304 2309 d.insert(v, v, cost=cost)
2305 2310
2306 2311 benches = [
2307 2312 (doinit, b'init'),
2308 2313 ]
2309 2314
2310 2315 if costlimit:
2311 2316 benches.extend([
2312 2317 (dogetscost, b'gets w/ cost limit'),
2313 2318 (doinsertscost, b'inserts w/ cost limit'),
2314 2319 (domixedcost, b'mixed w/ cost limit'),
2315 2320 ])
2316 2321 else:
2317 2322 benches.extend([
2318 2323 (dogets, b'gets'),
2319 2324 (doinserts, b'inserts'),
2320 2325 (dosets, b'sets'),
2321 2326 (domixed, b'mixed')
2322 2327 ])
2323 2328
2324 2329 for fn, title in benches:
2325 2330 timer, fm = gettimer(ui, opts)
2326 2331 timer(fn, title=title)
2327 2332 fm.end()
2328 2333
2329 2334 @command(b'perfwrite', formatteropts)
2330 2335 def perfwrite(ui, repo, **opts):
2331 2336 """microbenchmark ui.write
2332 2337 """
2333 2338 opts = _byteskwargs(opts)
2334 2339
2335 2340 timer, fm = gettimer(ui, opts)
2336 2341 def write():
2337 2342 for i in range(100000):
2338 2343 ui.write((b'Testing write performance\n'))
2339 2344 timer(write)
2340 2345 fm.end()
2341 2346
2342 2347 def uisetup(ui):
2343 2348 if (util.safehasattr(cmdutil, b'openrevlog') and
2344 2349 not util.safehasattr(commands, b'debugrevlogopts')):
2345 2350 # for "historical portability":
2346 2351 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
2347 2352 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
2348 2353 # openrevlog() should cause failure, because it has been
2349 2354 # available since 3.5 (or 49c583ca48c4).
2350 2355 def openrevlog(orig, repo, cmd, file_, opts):
2351 2356 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
2352 2357 raise error.Abort(b"This version doesn't support --dir option",
2353 2358 hint=b"use 3.5 or later")
2354 2359 return orig(repo, cmd, file_, opts)
2355 2360 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
General Comments 0
You need to be logged in to leave comments. Login now