##// END OF EJS Templates
perf: add `parent-2` as possible source for perfrevlogwrite...
Boris Feld -
r40588:b5b3dd4e default
parent child Browse files
Show More
@@ -1,2333 +1,2343 b''
1 1 # perf.py - performance test routines
2 2 '''helper extension to measure performance'''
3 3
4 4 # "historical portability" policy of perf.py:
5 5 #
6 6 # We have to do:
7 7 # - make perf.py "loadable" with as wide Mercurial version as possible
8 8 # This doesn't mean that perf commands work correctly with that Mercurial.
9 9 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
10 10 # - make historical perf command work correctly with as wide Mercurial
11 11 # version as possible
12 12 #
13 13 # We have to do, if possible with reasonable cost:
14 14 # - make recent perf command for historical feature work correctly
15 15 # with early Mercurial
16 16 #
17 17 # We don't have to do:
18 18 # - make perf command for recent feature work correctly with early
19 19 # Mercurial
20 20
21 21 from __future__ import absolute_import
22 22 import contextlib
23 23 import functools
24 24 import gc
25 25 import os
26 26 import random
27 27 import shutil
28 28 import struct
29 29 import sys
30 30 import tempfile
31 31 import threading
32 32 import time
33 33 from mercurial import (
34 34 changegroup,
35 35 cmdutil,
36 36 commands,
37 37 copies,
38 38 error,
39 39 extensions,
40 40 mdiff,
41 41 merge,
42 42 revlog,
43 43 util,
44 44 )
45 45
46 46 # for "historical portability":
47 47 # try to import modules separately (in dict order), and ignore
48 48 # failure, because these aren't available with early Mercurial
49 49 try:
50 50 from mercurial import branchmap # since 2.5 (or bcee63733aad)
51 51 except ImportError:
52 52 pass
53 53 try:
54 54 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
55 55 except ImportError:
56 56 pass
57 57 try:
58 58 from mercurial import registrar # since 3.7 (or 37d50250b696)
59 59 dir(registrar) # forcibly load it
60 60 except ImportError:
61 61 registrar = None
62 62 try:
63 63 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
64 64 except ImportError:
65 65 pass
66 66 try:
67 67 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
68 68 except ImportError:
69 69 pass
70 70
71 71 def identity(a):
72 72 return a
73 73
74 74 try:
75 75 from mercurial import pycompat
76 76 getargspec = pycompat.getargspec # added to module after 4.5
77 77 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
78 78 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
79 79 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
80 80 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
81 81 if pycompat.ispy3:
82 82 _maxint = sys.maxsize # per py3 docs for replacing maxint
83 83 else:
84 84 _maxint = sys.maxint
85 85 except (ImportError, AttributeError):
86 86 import inspect
87 87 getargspec = inspect.getargspec
88 88 _byteskwargs = identity
89 89 fsencode = identity # no py3 support
90 90 _maxint = sys.maxint # no py3 support
91 91 _sysstr = lambda x: x # no py3 support
92 92 _xrange = xrange
93 93
94 94 try:
95 95 # 4.7+
96 96 queue = pycompat.queue.Queue
97 97 except (AttributeError, ImportError):
98 98 # <4.7.
99 99 try:
100 100 queue = pycompat.queue
101 101 except (AttributeError, ImportError):
102 102 queue = util.queue
103 103
104 104 try:
105 105 from mercurial import logcmdutil
106 106 makelogtemplater = logcmdutil.maketemplater
107 107 except (AttributeError, ImportError):
108 108 try:
109 109 makelogtemplater = cmdutil.makelogtemplater
110 110 except (AttributeError, ImportError):
111 111 makelogtemplater = None
112 112
113 113 # for "historical portability":
114 114 # define util.safehasattr forcibly, because util.safehasattr has been
115 115 # available since 1.9.3 (or 94b200a11cf7)
116 116 _undefined = object()
117 117 def safehasattr(thing, attr):
118 118 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
119 119 setattr(util, 'safehasattr', safehasattr)
120 120
121 121 # for "historical portability":
122 122 # define util.timer forcibly, because util.timer has been available
123 123 # since ae5d60bb70c9
124 124 if safehasattr(time, 'perf_counter'):
125 125 util.timer = time.perf_counter
126 126 elif os.name == b'nt':
127 127 util.timer = time.clock
128 128 else:
129 129 util.timer = time.time
130 130
131 131 # for "historical portability":
132 132 # use locally defined empty option list, if formatteropts isn't
133 133 # available, because commands.formatteropts has been available since
134 134 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
135 135 # available since 2.2 (or ae5f92e154d3)
136 136 formatteropts = getattr(cmdutil, "formatteropts",
137 137 getattr(commands, "formatteropts", []))
138 138
139 139 # for "historical portability":
140 140 # use locally defined option list, if debugrevlogopts isn't available,
141 141 # because commands.debugrevlogopts has been available since 3.7 (or
142 142 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
143 143 # since 1.9 (or a79fea6b3e77).
144 144 revlogopts = getattr(cmdutil, "debugrevlogopts",
145 145 getattr(commands, "debugrevlogopts", [
146 146 (b'c', b'changelog', False, (b'open changelog')),
147 147 (b'm', b'manifest', False, (b'open manifest')),
148 148 (b'', b'dir', False, (b'open directory manifest')),
149 149 ]))
150 150
151 151 cmdtable = {}
152 152
153 153 # for "historical portability":
154 154 # define parsealiases locally, because cmdutil.parsealiases has been
155 155 # available since 1.5 (or 6252852b4332)
156 156 def parsealiases(cmd):
157 157 return cmd.split(b"|")
158 158
159 159 if safehasattr(registrar, 'command'):
160 160 command = registrar.command(cmdtable)
161 161 elif safehasattr(cmdutil, 'command'):
162 162 command = cmdutil.command(cmdtable)
163 163 if b'norepo' not in getargspec(command).args:
164 164 # for "historical portability":
165 165 # wrap original cmdutil.command, because "norepo" option has
166 166 # been available since 3.1 (or 75a96326cecb)
167 167 _command = command
168 168 def command(name, options=(), synopsis=None, norepo=False):
169 169 if norepo:
170 170 commands.norepo += b' %s' % b' '.join(parsealiases(name))
171 171 return _command(name, list(options), synopsis)
172 172 else:
173 173 # for "historical portability":
174 174 # define "@command" annotation locally, because cmdutil.command
175 175 # has been available since 1.9 (or 2daa5179e73f)
176 176 def command(name, options=(), synopsis=None, norepo=False):
177 177 def decorator(func):
178 178 if synopsis:
179 179 cmdtable[name] = func, list(options), synopsis
180 180 else:
181 181 cmdtable[name] = func, list(options)
182 182 if norepo:
183 183 commands.norepo += b' %s' % b' '.join(parsealiases(name))
184 184 return func
185 185 return decorator
186 186
187 187 try:
188 188 import mercurial.registrar
189 189 import mercurial.configitems
190 190 configtable = {}
191 191 configitem = mercurial.registrar.configitem(configtable)
192 192 configitem(b'perf', b'presleep',
193 193 default=mercurial.configitems.dynamicdefault,
194 194 )
195 195 configitem(b'perf', b'stub',
196 196 default=mercurial.configitems.dynamicdefault,
197 197 )
198 198 configitem(b'perf', b'parentscount',
199 199 default=mercurial.configitems.dynamicdefault,
200 200 )
201 201 configitem(b'perf', b'all-timing',
202 202 default=mercurial.configitems.dynamicdefault,
203 203 )
204 204 except (ImportError, AttributeError):
205 205 pass
206 206
207 207 def getlen(ui):
208 208 if ui.configbool(b"perf", b"stub", False):
209 209 return lambda x: 1
210 210 return len
211 211
212 212 def gettimer(ui, opts=None):
213 213 """return a timer function and formatter: (timer, formatter)
214 214
215 215 This function exists to gather the creation of formatter in a single
216 216 place instead of duplicating it in all performance commands."""
217 217
218 218 # enforce an idle period before execution to counteract power management
219 219 # experimental config: perf.presleep
220 220 time.sleep(getint(ui, b"perf", b"presleep", 1))
221 221
222 222 if opts is None:
223 223 opts = {}
224 224 # redirect all to stderr unless buffer api is in use
225 225 if not ui._buffers:
226 226 ui = ui.copy()
227 227 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
228 228 if uifout:
229 229 # for "historical portability":
230 230 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
231 231 uifout.set(ui.ferr)
232 232
233 233 # get a formatter
234 234 uiformatter = getattr(ui, 'formatter', None)
235 235 if uiformatter:
236 236 fm = uiformatter(b'perf', opts)
237 237 else:
238 238 # for "historical portability":
239 239 # define formatter locally, because ui.formatter has been
240 240 # available since 2.2 (or ae5f92e154d3)
241 241 from mercurial import node
242 242 class defaultformatter(object):
243 243 """Minimized composition of baseformatter and plainformatter
244 244 """
245 245 def __init__(self, ui, topic, opts):
246 246 self._ui = ui
247 247 if ui.debugflag:
248 248 self.hexfunc = node.hex
249 249 else:
250 250 self.hexfunc = node.short
251 251 def __nonzero__(self):
252 252 return False
253 253 __bool__ = __nonzero__
254 254 def startitem(self):
255 255 pass
256 256 def data(self, **data):
257 257 pass
258 258 def write(self, fields, deftext, *fielddata, **opts):
259 259 self._ui.write(deftext % fielddata, **opts)
260 260 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
261 261 if cond:
262 262 self._ui.write(deftext % fielddata, **opts)
263 263 def plain(self, text, **opts):
264 264 self._ui.write(text, **opts)
265 265 def end(self):
266 266 pass
267 267 fm = defaultformatter(ui, b'perf', opts)
268 268
269 269 # stub function, runs code only once instead of in a loop
270 270 # experimental config: perf.stub
271 271 if ui.configbool(b"perf", b"stub", False):
272 272 return functools.partial(stub_timer, fm), fm
273 273
274 274 # experimental config: perf.all-timing
275 275 displayall = ui.configbool(b"perf", b"all-timing", False)
276 276 return functools.partial(_timer, fm, displayall=displayall), fm
277 277
278 278 def stub_timer(fm, func, title=None):
279 279 func()
280 280
281 281 @contextlib.contextmanager
282 282 def timeone():
283 283 r = []
284 284 ostart = os.times()
285 285 cstart = util.timer()
286 286 yield r
287 287 cstop = util.timer()
288 288 ostop = os.times()
289 289 a, b = ostart, ostop
290 290 r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
291 291
292 292 def _timer(fm, func, title=None, displayall=False):
293 293 gc.collect()
294 294 results = []
295 295 begin = util.timer()
296 296 count = 0
297 297 while True:
298 298 with timeone() as item:
299 299 r = func()
300 300 count += 1
301 301 results.append(item[0])
302 302 cstop = util.timer()
303 303 if cstop - begin > 3 and count >= 100:
304 304 break
305 305 if cstop - begin > 10 and count >= 3:
306 306 break
307 307
308 308 formatone(fm, results, title=title, result=r,
309 309 displayall=displayall)
310 310
311 311 def formatone(fm, timings, title=None, result=None, displayall=False):
312 312
313 313 count = len(timings)
314 314
315 315 fm.startitem()
316 316
317 317 if title:
318 318 fm.write(b'title', b'! %s\n', title)
319 319 if result:
320 320 fm.write(b'result', b'! result: %s\n', result)
321 321 def display(role, entry):
322 322 prefix = b''
323 323 if role != b'best':
324 324 prefix = b'%s.' % role
325 325 fm.plain(b'!')
326 326 fm.write(prefix + b'wall', b' wall %f', entry[0])
327 327 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
328 328 fm.write(prefix + b'user', b' user %f', entry[1])
329 329 fm.write(prefix + b'sys', b' sys %f', entry[2])
330 330 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
331 331 fm.plain(b'\n')
332 332 timings.sort()
333 333 min_val = timings[0]
334 334 display(b'best', min_val)
335 335 if displayall:
336 336 max_val = timings[-1]
337 337 display(b'max', max_val)
338 338 avg = tuple([sum(x) / count for x in zip(*timings)])
339 339 display(b'avg', avg)
340 340 median = timings[len(timings) // 2]
341 341 display(b'median', median)
342 342
343 343 # utilities for historical portability
344 344
345 345 def getint(ui, section, name, default):
346 346 # for "historical portability":
347 347 # ui.configint has been available since 1.9 (or fa2b596db182)
348 348 v = ui.config(section, name, None)
349 349 if v is None:
350 350 return default
351 351 try:
352 352 return int(v)
353 353 except ValueError:
354 354 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
355 355 % (section, name, v))
356 356
357 357 def safeattrsetter(obj, name, ignoremissing=False):
358 358 """Ensure that 'obj' has 'name' attribute before subsequent setattr
359 359
360 360 This function is aborted, if 'obj' doesn't have 'name' attribute
361 361 at runtime. This avoids overlooking removal of an attribute, which
362 362 breaks assumption of performance measurement, in the future.
363 363
364 364 This function returns the object to (1) assign a new value, and
365 365 (2) restore an original value to the attribute.
366 366
367 367 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
368 368 abortion, and this function returns None. This is useful to
369 369 examine an attribute, which isn't ensured in all Mercurial
370 370 versions.
371 371 """
372 372 if not util.safehasattr(obj, name):
373 373 if ignoremissing:
374 374 return None
375 375 raise error.Abort((b"missing attribute %s of %s might break assumption"
376 376 b" of performance measurement") % (name, obj))
377 377
378 378 origvalue = getattr(obj, _sysstr(name))
379 379 class attrutil(object):
380 380 def set(self, newvalue):
381 381 setattr(obj, _sysstr(name), newvalue)
382 382 def restore(self):
383 383 setattr(obj, _sysstr(name), origvalue)
384 384
385 385 return attrutil()
386 386
387 387 # utilities to examine each internal API changes
388 388
389 389 def getbranchmapsubsettable():
390 390 # for "historical portability":
391 391 # subsettable is defined in:
392 392 # - branchmap since 2.9 (or 175c6fd8cacc)
393 393 # - repoview since 2.5 (or 59a9f18d4587)
394 394 for mod in (branchmap, repoview):
395 395 subsettable = getattr(mod, 'subsettable', None)
396 396 if subsettable:
397 397 return subsettable
398 398
399 399 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
400 400 # branchmap and repoview modules exist, but subsettable attribute
401 401 # doesn't)
402 402 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
403 403 hint=b"use 2.5 or later")
404 404
405 405 def getsvfs(repo):
406 406 """Return appropriate object to access files under .hg/store
407 407 """
408 408 # for "historical portability":
409 409 # repo.svfs has been available since 2.3 (or 7034365089bf)
410 410 svfs = getattr(repo, 'svfs', None)
411 411 if svfs:
412 412 return svfs
413 413 else:
414 414 return getattr(repo, 'sopener')
415 415
416 416 def getvfs(repo):
417 417 """Return appropriate object to access files under .hg
418 418 """
419 419 # for "historical portability":
420 420 # repo.vfs has been available since 2.3 (or 7034365089bf)
421 421 vfs = getattr(repo, 'vfs', None)
422 422 if vfs:
423 423 return vfs
424 424 else:
425 425 return getattr(repo, 'opener')
426 426
427 427 def repocleartagscachefunc(repo):
428 428 """Return the function to clear tags cache according to repo internal API
429 429 """
430 430 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
431 431 # in this case, setattr(repo, '_tagscache', None) or so isn't
432 432 # correct way to clear tags cache, because existing code paths
433 433 # expect _tagscache to be a structured object.
434 434 def clearcache():
435 435 # _tagscache has been filteredpropertycache since 2.5 (or
436 436 # 98c867ac1330), and delattr() can't work in such case
437 437 if b'_tagscache' in vars(repo):
438 438 del repo.__dict__[b'_tagscache']
439 439 return clearcache
440 440
441 441 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
442 442 if repotags: # since 1.4 (or 5614a628d173)
443 443 return lambda : repotags.set(None)
444 444
445 445 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
446 446 if repotagscache: # since 0.6 (or d7df759d0e97)
447 447 return lambda : repotagscache.set(None)
448 448
449 449 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
450 450 # this point, but it isn't so problematic, because:
451 451 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
452 452 # in perftags() causes failure soon
453 453 # - perf.py itself has been available since 1.1 (or eb240755386d)
454 454 raise error.Abort((b"tags API of this hg command is unknown"))
455 455
456 456 # utilities to clear cache
457 457
458 458 def clearfilecache(repo, attrname):
459 459 unfi = repo.unfiltered()
460 460 if attrname in vars(unfi):
461 461 delattr(unfi, attrname)
462 462 unfi._filecache.pop(attrname, None)
463 463
464 464 # perf commands
465 465
466 466 @command(b'perfwalk', formatteropts)
467 467 def perfwalk(ui, repo, *pats, **opts):
468 468 opts = _byteskwargs(opts)
469 469 timer, fm = gettimer(ui, opts)
470 470 m = scmutil.match(repo[None], pats, {})
471 471 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
472 472 ignored=False))))
473 473 fm.end()
474 474
475 475 @command(b'perfannotate', formatteropts)
476 476 def perfannotate(ui, repo, f, **opts):
477 477 opts = _byteskwargs(opts)
478 478 timer, fm = gettimer(ui, opts)
479 479 fc = repo[b'.'][f]
480 480 timer(lambda: len(fc.annotate(True)))
481 481 fm.end()
482 482
483 483 @command(b'perfstatus',
484 484 [(b'u', b'unknown', False,
485 485 b'ask status to look for unknown files')] + formatteropts)
486 486 def perfstatus(ui, repo, **opts):
487 487 opts = _byteskwargs(opts)
488 488 #m = match.always(repo.root, repo.getcwd())
489 489 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
490 490 # False))))
491 491 timer, fm = gettimer(ui, opts)
492 492 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
493 493 fm.end()
494 494
495 495 @command(b'perfaddremove', formatteropts)
496 496 def perfaddremove(ui, repo, **opts):
497 497 opts = _byteskwargs(opts)
498 498 timer, fm = gettimer(ui, opts)
499 499 try:
500 500 oldquiet = repo.ui.quiet
501 501 repo.ui.quiet = True
502 502 matcher = scmutil.match(repo[None])
503 503 opts[b'dry_run'] = True
504 504 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
505 505 finally:
506 506 repo.ui.quiet = oldquiet
507 507 fm.end()
508 508
509 509 def clearcaches(cl):
510 510 # behave somewhat consistently across internal API changes
511 511 if util.safehasattr(cl, b'clearcaches'):
512 512 cl.clearcaches()
513 513 elif util.safehasattr(cl, b'_nodecache'):
514 514 from mercurial.node import nullid, nullrev
515 515 cl._nodecache = {nullid: nullrev}
516 516 cl._nodepos = None
517 517
518 518 @command(b'perfheads', formatteropts)
519 519 def perfheads(ui, repo, **opts):
520 520 opts = _byteskwargs(opts)
521 521 timer, fm = gettimer(ui, opts)
522 522 cl = repo.changelog
523 523 def d():
524 524 len(cl.headrevs())
525 525 clearcaches(cl)
526 526 timer(d)
527 527 fm.end()
528 528
529 529 @command(b'perftags', formatteropts)
530 530 def perftags(ui, repo, **opts):
531 531 import mercurial.changelog
532 532 import mercurial.manifest
533 533
534 534 opts = _byteskwargs(opts)
535 535 timer, fm = gettimer(ui, opts)
536 536 svfs = getsvfs(repo)
537 537 repocleartagscache = repocleartagscachefunc(repo)
538 538 def t():
539 539 repo.changelog = mercurial.changelog.changelog(svfs)
540 540 rootmanifest = mercurial.manifest.manifestrevlog(svfs)
541 541 repo.manifestlog = mercurial.manifest.manifestlog(svfs, repo,
542 542 rootmanifest)
543 543 repocleartagscache()
544 544 return len(repo.tags())
545 545 timer(t)
546 546 fm.end()
547 547
548 548 @command(b'perfancestors', formatteropts)
549 549 def perfancestors(ui, repo, **opts):
550 550 opts = _byteskwargs(opts)
551 551 timer, fm = gettimer(ui, opts)
552 552 heads = repo.changelog.headrevs()
553 553 def d():
554 554 for a in repo.changelog.ancestors(heads):
555 555 pass
556 556 timer(d)
557 557 fm.end()
558 558
559 559 @command(b'perfancestorset', formatteropts)
560 560 def perfancestorset(ui, repo, revset, **opts):
561 561 opts = _byteskwargs(opts)
562 562 timer, fm = gettimer(ui, opts)
563 563 revs = repo.revs(revset)
564 564 heads = repo.changelog.headrevs()
565 565 def d():
566 566 s = repo.changelog.ancestors(heads)
567 567 for rev in revs:
568 568 rev in s
569 569 timer(d)
570 570 fm.end()
571 571
572 572 @command(b'perfbookmarks', formatteropts)
573 573 def perfbookmarks(ui, repo, **opts):
574 574 """benchmark parsing bookmarks from disk to memory"""
575 575 opts = _byteskwargs(opts)
576 576 timer, fm = gettimer(ui, opts)
577 577 def d():
578 578 clearfilecache(repo, b'_bookmarks')
579 579 repo._bookmarks
580 580 timer(d)
581 581 fm.end()
582 582
583 583 @command(b'perfbundleread', formatteropts, b'BUNDLE')
584 584 def perfbundleread(ui, repo, bundlepath, **opts):
585 585 """Benchmark reading of bundle files.
586 586
587 587 This command is meant to isolate the I/O part of bundle reading as
588 588 much as possible.
589 589 """
590 590 from mercurial import (
591 591 bundle2,
592 592 exchange,
593 593 streamclone,
594 594 )
595 595
596 596 opts = _byteskwargs(opts)
597 597
598 598 def makebench(fn):
599 599 def run():
600 600 with open(bundlepath, b'rb') as fh:
601 601 bundle = exchange.readbundle(ui, fh, bundlepath)
602 602 fn(bundle)
603 603
604 604 return run
605 605
606 606 def makereadnbytes(size):
607 607 def run():
608 608 with open(bundlepath, b'rb') as fh:
609 609 bundle = exchange.readbundle(ui, fh, bundlepath)
610 610 while bundle.read(size):
611 611 pass
612 612
613 613 return run
614 614
615 615 def makestdioread(size):
616 616 def run():
617 617 with open(bundlepath, b'rb') as fh:
618 618 while fh.read(size):
619 619 pass
620 620
621 621 return run
622 622
623 623 # bundle1
624 624
625 625 def deltaiter(bundle):
626 626 for delta in bundle.deltaiter():
627 627 pass
628 628
629 629 def iterchunks(bundle):
630 630 for chunk in bundle.getchunks():
631 631 pass
632 632
633 633 # bundle2
634 634
635 635 def forwardchunks(bundle):
636 636 for chunk in bundle._forwardchunks():
637 637 pass
638 638
639 639 def iterparts(bundle):
640 640 for part in bundle.iterparts():
641 641 pass
642 642
643 643 def iterpartsseekable(bundle):
644 644 for part in bundle.iterparts(seekable=True):
645 645 pass
646 646
647 647 def seek(bundle):
648 648 for part in bundle.iterparts(seekable=True):
649 649 part.seek(0, os.SEEK_END)
650 650
651 651 def makepartreadnbytes(size):
652 652 def run():
653 653 with open(bundlepath, b'rb') as fh:
654 654 bundle = exchange.readbundle(ui, fh, bundlepath)
655 655 for part in bundle.iterparts():
656 656 while part.read(size):
657 657 pass
658 658
659 659 return run
660 660
661 661 benches = [
662 662 (makestdioread(8192), b'read(8k)'),
663 663 (makestdioread(16384), b'read(16k)'),
664 664 (makestdioread(32768), b'read(32k)'),
665 665 (makestdioread(131072), b'read(128k)'),
666 666 ]
667 667
668 668 with open(bundlepath, b'rb') as fh:
669 669 bundle = exchange.readbundle(ui, fh, bundlepath)
670 670
671 671 if isinstance(bundle, changegroup.cg1unpacker):
672 672 benches.extend([
673 673 (makebench(deltaiter), b'cg1 deltaiter()'),
674 674 (makebench(iterchunks), b'cg1 getchunks()'),
675 675 (makereadnbytes(8192), b'cg1 read(8k)'),
676 676 (makereadnbytes(16384), b'cg1 read(16k)'),
677 677 (makereadnbytes(32768), b'cg1 read(32k)'),
678 678 (makereadnbytes(131072), b'cg1 read(128k)'),
679 679 ])
680 680 elif isinstance(bundle, bundle2.unbundle20):
681 681 benches.extend([
682 682 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
683 683 (makebench(iterparts), b'bundle2 iterparts()'),
684 684 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
685 685 (makebench(seek), b'bundle2 part seek()'),
686 686 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
687 687 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
688 688 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
689 689 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
690 690 ])
691 691 elif isinstance(bundle, streamclone.streamcloneapplier):
692 692 raise error.Abort(b'stream clone bundles not supported')
693 693 else:
694 694 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
695 695
696 696 for fn, title in benches:
697 697 timer, fm = gettimer(ui, opts)
698 698 timer(fn, title=title)
699 699 fm.end()
700 700
701 701 @command(b'perfchangegroupchangelog', formatteropts +
702 702 [(b'', b'version', b'02', b'changegroup version'),
703 703 (b'r', b'rev', b'', b'revisions to add to changegroup')])
704 704 def perfchangegroupchangelog(ui, repo, version=b'02', rev=None, **opts):
705 705 """Benchmark producing a changelog group for a changegroup.
706 706
707 707 This measures the time spent processing the changelog during a
708 708 bundle operation. This occurs during `hg bundle` and on a server
709 709 processing a `getbundle` wire protocol request (handles clones
710 710 and pull requests).
711 711
712 712 By default, all revisions are added to the changegroup.
713 713 """
714 714 opts = _byteskwargs(opts)
715 715 cl = repo.changelog
716 716 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
717 717 bundler = changegroup.getbundler(version, repo)
718 718
719 719 def d():
720 720 state, chunks = bundler._generatechangelog(cl, nodes)
721 721 for chunk in chunks:
722 722 pass
723 723
724 724 timer, fm = gettimer(ui, opts)
725 725
726 726 # Terminal printing can interfere with timing. So disable it.
727 727 with ui.configoverride({(b'progress', b'disable'): True}):
728 728 timer(d)
729 729
730 730 fm.end()
731 731
732 732 @command(b'perfdirs', formatteropts)
733 733 def perfdirs(ui, repo, **opts):
734 734 opts = _byteskwargs(opts)
735 735 timer, fm = gettimer(ui, opts)
736 736 dirstate = repo.dirstate
737 737 b'a' in dirstate
738 738 def d():
739 739 dirstate.hasdir(b'a')
740 740 del dirstate._map._dirs
741 741 timer(d)
742 742 fm.end()
743 743
744 744 @command(b'perfdirstate', formatteropts)
745 745 def perfdirstate(ui, repo, **opts):
746 746 opts = _byteskwargs(opts)
747 747 timer, fm = gettimer(ui, opts)
748 748 b"a" in repo.dirstate
749 749 def d():
750 750 repo.dirstate.invalidate()
751 751 b"a" in repo.dirstate
752 752 timer(d)
753 753 fm.end()
754 754
755 755 @command(b'perfdirstatedirs', formatteropts)
756 756 def perfdirstatedirs(ui, repo, **opts):
757 757 opts = _byteskwargs(opts)
758 758 timer, fm = gettimer(ui, opts)
759 759 b"a" in repo.dirstate
760 760 def d():
761 761 repo.dirstate.hasdir(b"a")
762 762 del repo.dirstate._map._dirs
763 763 timer(d)
764 764 fm.end()
765 765
766 766 @command(b'perfdirstatefoldmap', formatteropts)
767 767 def perfdirstatefoldmap(ui, repo, **opts):
768 768 opts = _byteskwargs(opts)
769 769 timer, fm = gettimer(ui, opts)
770 770 dirstate = repo.dirstate
771 771 b'a' in dirstate
772 772 def d():
773 773 dirstate._map.filefoldmap.get(b'a')
774 774 del dirstate._map.filefoldmap
775 775 timer(d)
776 776 fm.end()
777 777
778 778 @command(b'perfdirfoldmap', formatteropts)
779 779 def perfdirfoldmap(ui, repo, **opts):
780 780 opts = _byteskwargs(opts)
781 781 timer, fm = gettimer(ui, opts)
782 782 dirstate = repo.dirstate
783 783 b'a' in dirstate
784 784 def d():
785 785 dirstate._map.dirfoldmap.get(b'a')
786 786 del dirstate._map.dirfoldmap
787 787 del dirstate._map._dirs
788 788 timer(d)
789 789 fm.end()
790 790
791 791 @command(b'perfdirstatewrite', formatteropts)
792 792 def perfdirstatewrite(ui, repo, **opts):
793 793 opts = _byteskwargs(opts)
794 794 timer, fm = gettimer(ui, opts)
795 795 ds = repo.dirstate
796 796 b"a" in ds
797 797 def d():
798 798 ds._dirty = True
799 799 ds.write(repo.currenttransaction())
800 800 timer(d)
801 801 fm.end()
802 802
803 803 @command(b'perfmergecalculate',
804 804 [(b'r', b'rev', b'.', b'rev to merge against')] + formatteropts)
805 805 def perfmergecalculate(ui, repo, rev, **opts):
806 806 opts = _byteskwargs(opts)
807 807 timer, fm = gettimer(ui, opts)
808 808 wctx = repo[None]
809 809 rctx = scmutil.revsingle(repo, rev, rev)
810 810 ancestor = wctx.ancestor(rctx)
811 811 # we don't want working dir files to be stat'd in the benchmark, so prime
812 812 # that cache
813 813 wctx.dirty()
814 814 def d():
815 815 # acceptremote is True because we don't want prompts in the middle of
816 816 # our benchmark
817 817 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
818 818 acceptremote=True, followcopies=True)
819 819 timer(d)
820 820 fm.end()
821 821
822 822 @command(b'perfpathcopies', [], b"REV REV")
823 823 def perfpathcopies(ui, repo, rev1, rev2, **opts):
824 824 opts = _byteskwargs(opts)
825 825 timer, fm = gettimer(ui, opts)
826 826 ctx1 = scmutil.revsingle(repo, rev1, rev1)
827 827 ctx2 = scmutil.revsingle(repo, rev2, rev2)
828 828 def d():
829 829 copies.pathcopies(ctx1, ctx2)
830 830 timer(d)
831 831 fm.end()
832 832
833 833 @command(b'perfphases',
834 834 [(b'', b'full', False, b'include file reading time too'),
835 835 ], b"")
836 836 def perfphases(ui, repo, **opts):
837 837 """benchmark phasesets computation"""
838 838 opts = _byteskwargs(opts)
839 839 timer, fm = gettimer(ui, opts)
840 840 _phases = repo._phasecache
841 841 full = opts.get(b'full')
842 842 def d():
843 843 phases = _phases
844 844 if full:
845 845 clearfilecache(repo, b'_phasecache')
846 846 phases = repo._phasecache
847 847 phases.invalidate()
848 848 phases.loadphaserevs(repo)
849 849 timer(d)
850 850 fm.end()
851 851
852 852 @command(b'perfphasesremote',
853 853 [], b"[DEST]")
854 854 def perfphasesremote(ui, repo, dest=None, **opts):
855 855 """benchmark time needed to analyse phases of the remote server"""
856 856 from mercurial.node import (
857 857 bin,
858 858 )
859 859 from mercurial import (
860 860 exchange,
861 861 hg,
862 862 phases,
863 863 )
864 864 opts = _byteskwargs(opts)
865 865 timer, fm = gettimer(ui, opts)
866 866
867 867 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
868 868 if not path:
869 869 raise error.Abort((b'default repository not configured!'),
870 870 hint=(b"see 'hg help config.paths'"))
871 871 dest = path.pushloc or path.loc
872 872 branches = (path.branch, opts.get(b'branch') or [])
873 873 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
874 874 revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get(b'rev'))
875 875 other = hg.peer(repo, opts, dest)
876 876
877 877 # easier to perform discovery through the operation
878 878 op = exchange.pushoperation(repo, other)
879 879 exchange._pushdiscoverychangeset(op)
880 880
881 881 remotesubset = op.fallbackheads
882 882
883 883 with other.commandexecutor() as e:
884 884 remotephases = e.callcommand(b'listkeys',
885 885 {b'namespace': b'phases'}).result()
886 886 del other
887 887 publishing = remotephases.get(b'publishing', False)
888 888 if publishing:
889 889 ui.status((b'publishing: yes\n'))
890 890 else:
891 891 ui.status((b'publishing: no\n'))
892 892
893 893 nodemap = repo.changelog.nodemap
894 894 nonpublishroots = 0
895 895 for nhex, phase in remotephases.iteritems():
896 896 if nhex == b'publishing': # ignore data related to publish option
897 897 continue
898 898 node = bin(nhex)
899 899 if node in nodemap and int(phase):
900 900 nonpublishroots += 1
901 901 ui.status((b'number of roots: %d\n') % len(remotephases))
902 902 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
903 903 def d():
904 904 phases.remotephasessummary(repo,
905 905 remotesubset,
906 906 remotephases)
907 907 timer(d)
908 908 fm.end()
909 909
910 910 @command(b'perfmanifest',[
911 911 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
912 912 (b'', b'clear-disk', False, b'clear on-disk caches too'),
913 913 ] + formatteropts, b'REV|NODE')
914 914 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
915 915 """benchmark the time to read a manifest from disk and return a usable
916 916 dict-like object
917 917
918 918 Manifest caches are cleared before retrieval."""
919 919 opts = _byteskwargs(opts)
920 920 timer, fm = gettimer(ui, opts)
921 921 if not manifest_rev:
922 922 ctx = scmutil.revsingle(repo, rev, rev)
923 923 t = ctx.manifestnode()
924 924 else:
925 925 from mercurial.node import bin
926 926
927 927 if len(rev) == 40:
928 928 t = bin(rev)
929 929 else:
930 930 try:
931 931 rev = int(rev)
932 932
933 933 if util.safehasattr(repo.manifestlog, b'getstorage'):
934 934 t = repo.manifestlog.getstorage(b'').node(rev)
935 935 else:
936 936 t = repo.manifestlog._revlog.lookup(rev)
937 937 except ValueError:
938 938 raise error.Abort(b'manifest revision must be integer or full '
939 939 b'node')
940 940 def d():
941 941 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
942 942 repo.manifestlog[t].read()
943 943 timer(d)
944 944 fm.end()
945 945
946 946 @command(b'perfchangeset', formatteropts)
947 947 def perfchangeset(ui, repo, rev, **opts):
948 948 opts = _byteskwargs(opts)
949 949 timer, fm = gettimer(ui, opts)
950 950 n = scmutil.revsingle(repo, rev).node()
951 951 def d():
952 952 repo.changelog.read(n)
953 953 #repo.changelog._cache = None
954 954 timer(d)
955 955 fm.end()
956 956
957 957 @command(b'perfindex', formatteropts)
958 958 def perfindex(ui, repo, **opts):
959 959 import mercurial.revlog
960 960 opts = _byteskwargs(opts)
961 961 timer, fm = gettimer(ui, opts)
962 962 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
963 963 n = repo[b"tip"].node()
964 964 svfs = getsvfs(repo)
965 965 def d():
966 966 cl = mercurial.revlog.revlog(svfs, b"00changelog.i")
967 967 cl.rev(n)
968 968 timer(d)
969 969 fm.end()
970 970
971 971 @command(b'perfstartup', formatteropts)
972 972 def perfstartup(ui, repo, **opts):
973 973 opts = _byteskwargs(opts)
974 974 timer, fm = gettimer(ui, opts)
975 975 def d():
976 976 if os.name != r'nt':
977 977 os.system(b"HGRCPATH= %s version -q > /dev/null" %
978 978 fsencode(sys.argv[0]))
979 979 else:
980 980 os.environ[r'HGRCPATH'] = r' '
981 981 os.system(r"%s version -q > NUL" % sys.argv[0])
982 982 timer(d)
983 983 fm.end()
984 984
985 985 @command(b'perfparents', formatteropts)
986 986 def perfparents(ui, repo, **opts):
987 987 opts = _byteskwargs(opts)
988 988 timer, fm = gettimer(ui, opts)
989 989 # control the number of commits perfparents iterates over
990 990 # experimental config: perf.parentscount
991 991 count = getint(ui, b"perf", b"parentscount", 1000)
992 992 if len(repo.changelog) < count:
993 993 raise error.Abort(b"repo needs %d commits for this test" % count)
994 994 repo = repo.unfiltered()
995 995 nl = [repo.changelog.node(i) for i in _xrange(count)]
996 996 def d():
997 997 for n in nl:
998 998 repo.changelog.parents(n)
999 999 timer(d)
1000 1000 fm.end()
1001 1001
1002 1002 @command(b'perfctxfiles', formatteropts)
1003 1003 def perfctxfiles(ui, repo, x, **opts):
1004 1004 opts = _byteskwargs(opts)
1005 1005 x = int(x)
1006 1006 timer, fm = gettimer(ui, opts)
1007 1007 def d():
1008 1008 len(repo[x].files())
1009 1009 timer(d)
1010 1010 fm.end()
1011 1011
1012 1012 @command(b'perfrawfiles', formatteropts)
1013 1013 def perfrawfiles(ui, repo, x, **opts):
1014 1014 opts = _byteskwargs(opts)
1015 1015 x = int(x)
1016 1016 timer, fm = gettimer(ui, opts)
1017 1017 cl = repo.changelog
1018 1018 def d():
1019 1019 len(cl.read(x)[3])
1020 1020 timer(d)
1021 1021 fm.end()
1022 1022
1023 1023 @command(b'perflookup', formatteropts)
1024 1024 def perflookup(ui, repo, rev, **opts):
1025 1025 opts = _byteskwargs(opts)
1026 1026 timer, fm = gettimer(ui, opts)
1027 1027 timer(lambda: len(repo.lookup(rev)))
1028 1028 fm.end()
1029 1029
1030 1030 @command(b'perflinelogedits',
1031 1031 [(b'n', b'edits', 10000, b'number of edits'),
1032 1032 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1033 1033 ], norepo=True)
1034 1034 def perflinelogedits(ui, **opts):
1035 1035 from mercurial import linelog
1036 1036
1037 1037 opts = _byteskwargs(opts)
1038 1038
1039 1039 edits = opts[b'edits']
1040 1040 maxhunklines = opts[b'max_hunk_lines']
1041 1041
1042 1042 maxb1 = 100000
1043 1043 random.seed(0)
1044 1044 randint = random.randint
1045 1045 currentlines = 0
1046 1046 arglist = []
1047 1047 for rev in _xrange(edits):
1048 1048 a1 = randint(0, currentlines)
1049 1049 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1050 1050 b1 = randint(0, maxb1)
1051 1051 b2 = randint(b1, b1 + maxhunklines)
1052 1052 currentlines += (b2 - b1) - (a2 - a1)
1053 1053 arglist.append((rev, a1, a2, b1, b2))
1054 1054
1055 1055 def d():
1056 1056 ll = linelog.linelog()
1057 1057 for args in arglist:
1058 1058 ll.replacelines(*args)
1059 1059
1060 1060 timer, fm = gettimer(ui, opts)
1061 1061 timer(d)
1062 1062 fm.end()
1063 1063
1064 1064 @command(b'perfrevrange', formatteropts)
1065 1065 def perfrevrange(ui, repo, *specs, **opts):
1066 1066 opts = _byteskwargs(opts)
1067 1067 timer, fm = gettimer(ui, opts)
1068 1068 revrange = scmutil.revrange
1069 1069 timer(lambda: len(revrange(repo, specs)))
1070 1070 fm.end()
1071 1071
1072 1072 @command(b'perfnodelookup', formatteropts)
1073 1073 def perfnodelookup(ui, repo, rev, **opts):
1074 1074 opts = _byteskwargs(opts)
1075 1075 timer, fm = gettimer(ui, opts)
1076 1076 import mercurial.revlog
1077 1077 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1078 1078 n = scmutil.revsingle(repo, rev).node()
1079 1079 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1080 1080 def d():
1081 1081 cl.rev(n)
1082 1082 clearcaches(cl)
1083 1083 timer(d)
1084 1084 fm.end()
1085 1085
1086 1086 @command(b'perflog',
1087 1087 [(b'', b'rename', False, b'ask log to follow renames')
1088 1088 ] + formatteropts)
1089 1089 def perflog(ui, repo, rev=None, **opts):
1090 1090 opts = _byteskwargs(opts)
1091 1091 if rev is None:
1092 1092 rev=[]
1093 1093 timer, fm = gettimer(ui, opts)
1094 1094 ui.pushbuffer()
1095 1095 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1096 1096 copies=opts.get(b'rename')))
1097 1097 ui.popbuffer()
1098 1098 fm.end()
1099 1099
1100 1100 @command(b'perfmoonwalk', formatteropts)
1101 1101 def perfmoonwalk(ui, repo, **opts):
1102 1102 """benchmark walking the changelog backwards
1103 1103
1104 1104 This also loads the changelog data for each revision in the changelog.
1105 1105 """
1106 1106 opts = _byteskwargs(opts)
1107 1107 timer, fm = gettimer(ui, opts)
1108 1108 def moonwalk():
1109 1109 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1110 1110 ctx = repo[i]
1111 1111 ctx.branch() # read changelog data (in addition to the index)
1112 1112 timer(moonwalk)
1113 1113 fm.end()
1114 1114
1115 1115 @command(b'perftemplating',
1116 1116 [(b'r', b'rev', [], b'revisions to run the template on'),
1117 1117 ] + formatteropts)
1118 1118 def perftemplating(ui, repo, testedtemplate=None, **opts):
1119 1119 """test the rendering time of a given template"""
1120 1120 if makelogtemplater is None:
1121 1121 raise error.Abort((b"perftemplating not available with this Mercurial"),
1122 1122 hint=b"use 4.3 or later")
1123 1123
1124 1124 opts = _byteskwargs(opts)
1125 1125
1126 1126 nullui = ui.copy()
1127 1127 nullui.fout = open(os.devnull, r'wb')
1128 1128 nullui.disablepager()
1129 1129 revs = opts.get(b'rev')
1130 1130 if not revs:
1131 1131 revs = [b'all()']
1132 1132 revs = list(scmutil.revrange(repo, revs))
1133 1133
1134 1134 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1135 1135 b' {author|person}: {desc|firstline}\n')
1136 1136 if testedtemplate is None:
1137 1137 testedtemplate = defaulttemplate
1138 1138 displayer = makelogtemplater(nullui, repo, testedtemplate)
1139 1139 def format():
1140 1140 for r in revs:
1141 1141 ctx = repo[r]
1142 1142 displayer.show(ctx)
1143 1143 displayer.flush(ctx)
1144 1144
1145 1145 timer, fm = gettimer(ui, opts)
1146 1146 timer(format)
1147 1147 fm.end()
1148 1148
1149 1149 @command(b'perfcca', formatteropts)
1150 1150 def perfcca(ui, repo, **opts):
1151 1151 opts = _byteskwargs(opts)
1152 1152 timer, fm = gettimer(ui, opts)
1153 1153 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1154 1154 fm.end()
1155 1155
1156 1156 @command(b'perffncacheload', formatteropts)
1157 1157 def perffncacheload(ui, repo, **opts):
1158 1158 opts = _byteskwargs(opts)
1159 1159 timer, fm = gettimer(ui, opts)
1160 1160 s = repo.store
1161 1161 def d():
1162 1162 s.fncache._load()
1163 1163 timer(d)
1164 1164 fm.end()
1165 1165
1166 1166 @command(b'perffncachewrite', formatteropts)
1167 1167 def perffncachewrite(ui, repo, **opts):
1168 1168 opts = _byteskwargs(opts)
1169 1169 timer, fm = gettimer(ui, opts)
1170 1170 s = repo.store
1171 1171 lock = repo.lock()
1172 1172 s.fncache._load()
1173 1173 tr = repo.transaction(b'perffncachewrite')
1174 1174 tr.addbackup(b'fncache')
1175 1175 def d():
1176 1176 s.fncache._dirty = True
1177 1177 s.fncache.write(tr)
1178 1178 timer(d)
1179 1179 tr.close()
1180 1180 lock.release()
1181 1181 fm.end()
1182 1182
1183 1183 @command(b'perffncacheencode', formatteropts)
1184 1184 def perffncacheencode(ui, repo, **opts):
1185 1185 opts = _byteskwargs(opts)
1186 1186 timer, fm = gettimer(ui, opts)
1187 1187 s = repo.store
1188 1188 s.fncache._load()
1189 1189 def d():
1190 1190 for p in s.fncache.entries:
1191 1191 s.encode(p)
1192 1192 timer(d)
1193 1193 fm.end()
1194 1194
1195 1195 def _bdiffworker(q, blocks, xdiff, ready, done):
1196 1196 while not done.is_set():
1197 1197 pair = q.get()
1198 1198 while pair is not None:
1199 1199 if xdiff:
1200 1200 mdiff.bdiff.xdiffblocks(*pair)
1201 1201 elif blocks:
1202 1202 mdiff.bdiff.blocks(*pair)
1203 1203 else:
1204 1204 mdiff.textdiff(*pair)
1205 1205 q.task_done()
1206 1206 pair = q.get()
1207 1207 q.task_done() # for the None one
1208 1208 with ready:
1209 1209 ready.wait()
1210 1210
1211 1211 def _manifestrevision(repo, mnode):
1212 1212 ml = repo.manifestlog
1213 1213
1214 1214 if util.safehasattr(ml, b'getstorage'):
1215 1215 store = ml.getstorage(b'')
1216 1216 else:
1217 1217 store = ml._revlog
1218 1218
1219 1219 return store.revision(mnode)
1220 1220
1221 1221 @command(b'perfbdiff', revlogopts + formatteropts + [
1222 1222 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1223 1223 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1224 1224 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1225 1225 (b'', b'blocks', False, b'test computing diffs into blocks'),
1226 1226 (b'', b'xdiff', False, b'use xdiff algorithm'),
1227 1227 ],
1228 1228
1229 1229 b'-c|-m|FILE REV')
1230 1230 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1231 1231 """benchmark a bdiff between revisions
1232 1232
1233 1233 By default, benchmark a bdiff between its delta parent and itself.
1234 1234
1235 1235 With ``--count``, benchmark bdiffs between delta parents and self for N
1236 1236 revisions starting at the specified revision.
1237 1237
1238 1238 With ``--alldata``, assume the requested revision is a changeset and
1239 1239 measure bdiffs for all changes related to that changeset (manifest
1240 1240 and filelogs).
1241 1241 """
1242 1242 opts = _byteskwargs(opts)
1243 1243
1244 1244 if opts[b'xdiff'] and not opts[b'blocks']:
1245 1245 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1246 1246
1247 1247 if opts[b'alldata']:
1248 1248 opts[b'changelog'] = True
1249 1249
1250 1250 if opts.get(b'changelog') or opts.get(b'manifest'):
1251 1251 file_, rev = None, file_
1252 1252 elif rev is None:
1253 1253 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1254 1254
1255 1255 blocks = opts[b'blocks']
1256 1256 xdiff = opts[b'xdiff']
1257 1257 textpairs = []
1258 1258
1259 1259 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1260 1260
1261 1261 startrev = r.rev(r.lookup(rev))
1262 1262 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1263 1263 if opts[b'alldata']:
1264 1264 # Load revisions associated with changeset.
1265 1265 ctx = repo[rev]
1266 1266 mtext = _manifestrevision(repo, ctx.manifestnode())
1267 1267 for pctx in ctx.parents():
1268 1268 pman = _manifestrevision(repo, pctx.manifestnode())
1269 1269 textpairs.append((pman, mtext))
1270 1270
1271 1271 # Load filelog revisions by iterating manifest delta.
1272 1272 man = ctx.manifest()
1273 1273 pman = ctx.p1().manifest()
1274 1274 for filename, change in pman.diff(man).items():
1275 1275 fctx = repo.file(filename)
1276 1276 f1 = fctx.revision(change[0][0] or -1)
1277 1277 f2 = fctx.revision(change[1][0] or -1)
1278 1278 textpairs.append((f1, f2))
1279 1279 else:
1280 1280 dp = r.deltaparent(rev)
1281 1281 textpairs.append((r.revision(dp), r.revision(rev)))
1282 1282
1283 1283 withthreads = threads > 0
1284 1284 if not withthreads:
1285 1285 def d():
1286 1286 for pair in textpairs:
1287 1287 if xdiff:
1288 1288 mdiff.bdiff.xdiffblocks(*pair)
1289 1289 elif blocks:
1290 1290 mdiff.bdiff.blocks(*pair)
1291 1291 else:
1292 1292 mdiff.textdiff(*pair)
1293 1293 else:
1294 1294 q = queue()
1295 1295 for i in _xrange(threads):
1296 1296 q.put(None)
1297 1297 ready = threading.Condition()
1298 1298 done = threading.Event()
1299 1299 for i in _xrange(threads):
1300 1300 threading.Thread(target=_bdiffworker,
1301 1301 args=(q, blocks, xdiff, ready, done)).start()
1302 1302 q.join()
1303 1303 def d():
1304 1304 for pair in textpairs:
1305 1305 q.put(pair)
1306 1306 for i in _xrange(threads):
1307 1307 q.put(None)
1308 1308 with ready:
1309 1309 ready.notify_all()
1310 1310 q.join()
1311 1311 timer, fm = gettimer(ui, opts)
1312 1312 timer(d)
1313 1313 fm.end()
1314 1314
1315 1315 if withthreads:
1316 1316 done.set()
1317 1317 for i in _xrange(threads):
1318 1318 q.put(None)
1319 1319 with ready:
1320 1320 ready.notify_all()
1321 1321
1322 1322 @command(b'perfunidiff', revlogopts + formatteropts + [
1323 1323 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1324 1324 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
1325 1325 ], b'-c|-m|FILE REV')
1326 1326 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1327 1327 """benchmark a unified diff between revisions
1328 1328
1329 1329 This doesn't include any copy tracing - it's just a unified diff
1330 1330 of the texts.
1331 1331
1332 1332 By default, benchmark a diff between its delta parent and itself.
1333 1333
1334 1334 With ``--count``, benchmark diffs between delta parents and self for N
1335 1335 revisions starting at the specified revision.
1336 1336
1337 1337 With ``--alldata``, assume the requested revision is a changeset and
1338 1338 measure diffs for all changes related to that changeset (manifest
1339 1339 and filelogs).
1340 1340 """
1341 1341 opts = _byteskwargs(opts)
1342 1342 if opts[b'alldata']:
1343 1343 opts[b'changelog'] = True
1344 1344
1345 1345 if opts.get(b'changelog') or opts.get(b'manifest'):
1346 1346 file_, rev = None, file_
1347 1347 elif rev is None:
1348 1348 raise error.CommandError(b'perfunidiff', b'invalid arguments')
1349 1349
1350 1350 textpairs = []
1351 1351
1352 1352 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
1353 1353
1354 1354 startrev = r.rev(r.lookup(rev))
1355 1355 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1356 1356 if opts[b'alldata']:
1357 1357 # Load revisions associated with changeset.
1358 1358 ctx = repo[rev]
1359 1359 mtext = _manifestrevision(repo, ctx.manifestnode())
1360 1360 for pctx in ctx.parents():
1361 1361 pman = _manifestrevision(repo, pctx.manifestnode())
1362 1362 textpairs.append((pman, mtext))
1363 1363
1364 1364 # Load filelog revisions by iterating manifest delta.
1365 1365 man = ctx.manifest()
1366 1366 pman = ctx.p1().manifest()
1367 1367 for filename, change in pman.diff(man).items():
1368 1368 fctx = repo.file(filename)
1369 1369 f1 = fctx.revision(change[0][0] or -1)
1370 1370 f2 = fctx.revision(change[1][0] or -1)
1371 1371 textpairs.append((f1, f2))
1372 1372 else:
1373 1373 dp = r.deltaparent(rev)
1374 1374 textpairs.append((r.revision(dp), r.revision(rev)))
1375 1375
1376 1376 def d():
1377 1377 for left, right in textpairs:
1378 1378 # The date strings don't matter, so we pass empty strings.
1379 1379 headerlines, hunks = mdiff.unidiff(
1380 1380 left, b'', right, b'', b'left', b'right', binary=False)
1381 1381 # consume iterators in roughly the way patch.py does
1382 1382 b'\n'.join(headerlines)
1383 1383 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1384 1384 timer, fm = gettimer(ui, opts)
1385 1385 timer(d)
1386 1386 fm.end()
1387 1387
1388 1388 @command(b'perfdiffwd', formatteropts)
1389 1389 def perfdiffwd(ui, repo, **opts):
1390 1390 """Profile diff of working directory changes"""
1391 1391 opts = _byteskwargs(opts)
1392 1392 timer, fm = gettimer(ui, opts)
1393 1393 options = {
1394 1394 'w': 'ignore_all_space',
1395 1395 'b': 'ignore_space_change',
1396 1396 'B': 'ignore_blank_lines',
1397 1397 }
1398 1398
1399 1399 for diffopt in ('', 'w', 'b', 'B', 'wB'):
1400 1400 opts = dict((options[c], b'1') for c in diffopt)
1401 1401 def d():
1402 1402 ui.pushbuffer()
1403 1403 commands.diff(ui, repo, **opts)
1404 1404 ui.popbuffer()
1405 1405 diffopt = diffopt.encode('ascii')
1406 1406 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
1407 1407 timer(d, title)
1408 1408 fm.end()
1409 1409
1410 1410 @command(b'perfrevlogindex', revlogopts + formatteropts,
1411 1411 b'-c|-m|FILE')
1412 1412 def perfrevlogindex(ui, repo, file_=None, **opts):
1413 1413 """Benchmark operations against a revlog index.
1414 1414
1415 1415 This tests constructing a revlog instance, reading index data,
1416 1416 parsing index data, and performing various operations related to
1417 1417 index data.
1418 1418 """
1419 1419
1420 1420 opts = _byteskwargs(opts)
1421 1421
1422 1422 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
1423 1423
1424 1424 opener = getattr(rl, 'opener') # trick linter
1425 1425 indexfile = rl.indexfile
1426 1426 data = opener.read(indexfile)
1427 1427
1428 1428 header = struct.unpack(b'>I', data[0:4])[0]
1429 1429 version = header & 0xFFFF
1430 1430 if version == 1:
1431 1431 revlogio = revlog.revlogio()
1432 1432 inline = header & (1 << 16)
1433 1433 else:
1434 1434 raise error.Abort((b'unsupported revlog version: %d') % version)
1435 1435
1436 1436 rllen = len(rl)
1437 1437
1438 1438 node0 = rl.node(0)
1439 1439 node25 = rl.node(rllen // 4)
1440 1440 node50 = rl.node(rllen // 2)
1441 1441 node75 = rl.node(rllen // 4 * 3)
1442 1442 node100 = rl.node(rllen - 1)
1443 1443
1444 1444 allrevs = range(rllen)
1445 1445 allrevsrev = list(reversed(allrevs))
1446 1446 allnodes = [rl.node(rev) for rev in range(rllen)]
1447 1447 allnodesrev = list(reversed(allnodes))
1448 1448
1449 1449 def constructor():
1450 1450 revlog.revlog(opener, indexfile)
1451 1451
1452 1452 def read():
1453 1453 with opener(indexfile) as fh:
1454 1454 fh.read()
1455 1455
1456 1456 def parseindex():
1457 1457 revlogio.parseindex(data, inline)
1458 1458
1459 1459 def getentry(revornode):
1460 1460 index = revlogio.parseindex(data, inline)[0]
1461 1461 index[revornode]
1462 1462
1463 1463 def getentries(revs, count=1):
1464 1464 index = revlogio.parseindex(data, inline)[0]
1465 1465
1466 1466 for i in range(count):
1467 1467 for rev in revs:
1468 1468 index[rev]
1469 1469
1470 1470 def resolvenode(node):
1471 1471 nodemap = revlogio.parseindex(data, inline)[1]
1472 1472 # This only works for the C code.
1473 1473 if nodemap is None:
1474 1474 return
1475 1475
1476 1476 try:
1477 1477 nodemap[node]
1478 1478 except error.RevlogError:
1479 1479 pass
1480 1480
1481 1481 def resolvenodes(nodes, count=1):
1482 1482 nodemap = revlogio.parseindex(data, inline)[1]
1483 1483 if nodemap is None:
1484 1484 return
1485 1485
1486 1486 for i in range(count):
1487 1487 for node in nodes:
1488 1488 try:
1489 1489 nodemap[node]
1490 1490 except error.RevlogError:
1491 1491 pass
1492 1492
1493 1493 benches = [
1494 1494 (constructor, b'revlog constructor'),
1495 1495 (read, b'read'),
1496 1496 (parseindex, b'create index object'),
1497 1497 (lambda: getentry(0), b'retrieve index entry for rev 0'),
1498 1498 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
1499 1499 (lambda: resolvenode(node0), b'look up node at rev 0'),
1500 1500 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
1501 1501 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
1502 1502 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
1503 1503 (lambda: resolvenode(node100), b'look up node at tip'),
1504 1504 # 2x variation is to measure caching impact.
1505 1505 (lambda: resolvenodes(allnodes),
1506 1506 b'look up all nodes (forward)'),
1507 1507 (lambda: resolvenodes(allnodes, 2),
1508 1508 b'look up all nodes 2x (forward)'),
1509 1509 (lambda: resolvenodes(allnodesrev),
1510 1510 b'look up all nodes (reverse)'),
1511 1511 (lambda: resolvenodes(allnodesrev, 2),
1512 1512 b'look up all nodes 2x (reverse)'),
1513 1513 (lambda: getentries(allrevs),
1514 1514 b'retrieve all index entries (forward)'),
1515 1515 (lambda: getentries(allrevs, 2),
1516 1516 b'retrieve all index entries 2x (forward)'),
1517 1517 (lambda: getentries(allrevsrev),
1518 1518 b'retrieve all index entries (reverse)'),
1519 1519 (lambda: getentries(allrevsrev, 2),
1520 1520 b'retrieve all index entries 2x (reverse)'),
1521 1521 ]
1522 1522
1523 1523 for fn, title in benches:
1524 1524 timer, fm = gettimer(ui, opts)
1525 1525 timer(fn, title=title)
1526 1526 fm.end()
1527 1527
1528 1528 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
1529 1529 [(b'd', b'dist', 100, b'distance between the revisions'),
1530 1530 (b's', b'startrev', 0, b'revision to start reading at'),
1531 1531 (b'', b'reverse', False, b'read in reverse')],
1532 1532 b'-c|-m|FILE')
1533 1533 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
1534 1534 **opts):
1535 1535 """Benchmark reading a series of revisions from a revlog.
1536 1536
1537 1537 By default, we read every ``-d/--dist`` revision from 0 to tip of
1538 1538 the specified revlog.
1539 1539
1540 1540 The start revision can be defined via ``-s/--startrev``.
1541 1541 """
1542 1542 opts = _byteskwargs(opts)
1543 1543
1544 1544 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
1545 1545 rllen = getlen(ui)(rl)
1546 1546
1547 1547 if startrev < 0:
1548 1548 startrev = rllen + startrev
1549 1549
1550 1550 def d():
1551 1551 rl.clearcaches()
1552 1552
1553 1553 beginrev = startrev
1554 1554 endrev = rllen
1555 1555 dist = opts[b'dist']
1556 1556
1557 1557 if reverse:
1558 1558 beginrev, endrev = endrev - 1, beginrev - 1
1559 1559 dist = -1 * dist
1560 1560
1561 1561 for x in _xrange(beginrev, endrev, dist):
1562 1562 # Old revisions don't support passing int.
1563 1563 n = rl.node(x)
1564 1564 rl.revision(n)
1565 1565
1566 1566 timer, fm = gettimer(ui, opts)
1567 1567 timer(d)
1568 1568 fm.end()
1569 1569
1570 1570 @command(b'perfrevlogwrite', revlogopts + formatteropts +
1571 1571 [(b's', b'startrev', 1000, b'revision to start writing at'),
1572 1572 (b'', b'stoprev', -1, b'last revision to write'),
1573 1573 (b'', b'count', 3, b'last revision to write'),
1574 1574 (b'', b'details', False, b'print timing for every revisions tested'),
1575 1575 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
1576 1576 ],
1577 1577 b'-c|-m|FILE')
1578 1578 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
1579 1579 """Benchmark writing a series of revisions to a revlog.
1580 1580
1581 1581 Possible source values are:
1582 1582 * `full`: add from a full text (default).
1583 1583 * `parent-1`: add from a delta to the first parent
1584 * `parent-2`: add from a delta to the second parent if it exists
1585 (use a delta from the first parent otherwise)
1584 1586 """
1585 1587 opts = _byteskwargs(opts)
1586 1588
1587 1589 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
1588 1590 rllen = getlen(ui)(rl)
1589 1591 if startrev < 0:
1590 1592 startrev = rllen + startrev
1591 1593 if stoprev < 0:
1592 1594 stoprev = rllen + stoprev
1593 1595
1594 1596 source = opts['source']
1595 validsource = (b'full', b'parent-1')
1597 validsource = (b'full', b'parent-1', b'parent-2')
1596 1598 if source not in validsource:
1597 1599 raise error.Abort('invalid source type: %s' % source)
1598 1600
1599 1601 ### actually gather results
1600 1602 count = opts['count']
1601 1603 if count <= 0:
1602 1604 raise error.Abort('invalide run count: %d' % count)
1603 1605 allresults = []
1604 1606 for c in range(count):
1605 1607 timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1)
1606 1608 allresults.append(timing)
1607 1609
1608 1610 ### consolidate the results in a single list
1609 1611 results = []
1610 1612 for idx, (rev, t) in enumerate(allresults[0]):
1611 1613 ts = [t]
1612 1614 for other in allresults[1:]:
1613 1615 orev, ot = other[idx]
1614 1616 assert orev == rev
1615 1617 ts.append(ot)
1616 1618 results.append((rev, ts))
1617 1619 resultcount = len(results)
1618 1620
1619 1621 ### Compute and display relevant statistics
1620 1622
1621 1623 # get a formatter
1622 1624 fm = ui.formatter(b'perf', opts)
1623 1625 displayall = ui.configbool(b"perf", b"all-timing", False)
1624 1626
1625 1627 # print individual details if requested
1626 1628 if opts['details']:
1627 1629 for idx, item in enumerate(results, 1):
1628 1630 rev, data = item
1629 1631 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
1630 1632 formatone(fm, data, title=title, displayall=displayall)
1631 1633
1632 1634 # sorts results by median time
1633 1635 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
1634 1636 # list of (name, index) to display)
1635 1637 relevants = [
1636 1638 ("min", 0),
1637 1639 ("10%", resultcount * 10 // 100),
1638 1640 ("25%", resultcount * 25 // 100),
1639 1641 ("50%", resultcount * 70 // 100),
1640 1642 ("75%", resultcount * 75 // 100),
1641 1643 ("90%", resultcount * 90 // 100),
1642 1644 ("95%", resultcount * 95 // 100),
1643 1645 ("99%", resultcount * 99 // 100),
1644 1646 ("max", -1),
1645 1647 ]
1646 1648 if not ui.quiet:
1647 1649 for name, idx in relevants:
1648 1650 data = results[idx]
1649 1651 title = '%s of %d, rev %d' % (name, resultcount, data[0])
1650 1652 formatone(fm, data[1], title=title, displayall=displayall)
1651 1653
1652 1654 # XXX summing that many float will not be very precise, we ignore this fact
1653 1655 # for now
1654 1656 totaltime = []
1655 1657 for item in allresults:
1656 1658 totaltime.append((sum(x[1][0] for x in item),
1657 1659 sum(x[1][1] for x in item),
1658 1660 sum(x[1][2] for x in item),)
1659 1661 )
1660 1662 formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
1661 1663 displayall=displayall)
1662 1664 fm.end()
1663 1665
1664 1666 class _faketr(object):
1665 1667 def add(s, x, y, z=None):
1666 1668 return None
1667 1669
1668 1670 def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None):
1669 1671 timings = []
1670 1672 tr = _faketr()
1671 1673 with _temprevlog(ui, orig, startrev) as dest:
1672 1674 revs = list(orig.revs(startrev, stoprev))
1673 1675 total = len(revs)
1674 1676 topic = 'adding'
1675 1677 if runidx is not None:
1676 1678 topic += ' (run #%d)' % runidx
1677 1679 for idx, rev in enumerate(revs):
1678 1680 ui.progress(topic, idx, unit='revs', total=total)
1679 1681 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
1680 1682 with timeone() as r:
1681 1683 dest.addrawrevision(*addargs, **addkwargs)
1682 1684 timings.append((rev, r[0]))
1683 1685 ui.progress(topic, total, unit='revs', total=total)
1684 1686 ui.progress(topic, None, unit='revs', total=total)
1685 1687 return timings
1686 1688
1687 1689 def _getrevisionseed(orig, rev, tr, source):
1690 from mercurial.node import nullid
1691
1688 1692 linkrev = orig.linkrev(rev)
1689 1693 node = orig.node(rev)
1690 1694 p1, p2 = orig.parents(node)
1691 1695 flags = orig.flags(rev)
1692 1696 cachedelta = None
1693 1697 text = None
1694 1698
1695 1699 if source == b'full':
1696 1700 text = orig.revision(rev)
1697 1701 elif source == b'parent-1':
1698 1702 baserev = orig.rev(p1)
1699 1703 cachedelta = (baserev, orig.revdiff(p1, rev))
1704 elif source == b'parent-2':
1705 parent = p2
1706 if p2 == nullid:
1707 parent = p1
1708 baserev = orig.rev(parent)
1709 cachedelta = (baserev, orig.revdiff(parent, rev))
1700 1710
1701 1711 return ((text, tr, linkrev, p1, p2),
1702 1712 {'node': node, 'flags': flags, 'cachedelta': cachedelta})
1703 1713
1704 1714 @contextlib.contextmanager
1705 1715 def _temprevlog(ui, orig, truncaterev):
1706 1716 from mercurial import vfs as vfsmod
1707 1717
1708 1718 if orig._inline:
1709 1719 raise error.Abort('not supporting inline revlog (yet)')
1710 1720
1711 1721 origindexpath = orig.opener.join(orig.indexfile)
1712 1722 origdatapath = orig.opener.join(orig.datafile)
1713 1723 indexname = 'revlog.i'
1714 1724 dataname = 'revlog.d'
1715 1725
1716 1726 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
1717 1727 try:
1718 1728 # copy the data file in a temporary directory
1719 1729 ui.debug('copying data in %s\n' % tmpdir)
1720 1730 destindexpath = os.path.join(tmpdir, 'revlog.i')
1721 1731 destdatapath = os.path.join(tmpdir, 'revlog.d')
1722 1732 shutil.copyfile(origindexpath, destindexpath)
1723 1733 shutil.copyfile(origdatapath, destdatapath)
1724 1734
1725 1735 # remove the data we want to add again
1726 1736 ui.debug('truncating data to be rewritten\n')
1727 1737 with open(destindexpath, 'ab') as index:
1728 1738 index.seek(0)
1729 1739 index.truncate(truncaterev * orig._io.size)
1730 1740 with open(destdatapath, 'ab') as data:
1731 1741 data.seek(0)
1732 1742 data.truncate(orig.start(truncaterev))
1733 1743
1734 1744 # instantiate a new revlog from the temporary copy
1735 1745 ui.debug('truncating adding to be rewritten\n')
1736 1746 vfs = vfsmod.vfs(tmpdir)
1737 1747 vfs.options = getattr(orig.opener, 'options', None)
1738 1748
1739 1749 dest = revlog.revlog(vfs,
1740 1750 indexfile=indexname,
1741 1751 datafile=dataname)
1742 1752 if dest._inline:
1743 1753 raise error.Abort('not supporting inline revlog (yet)')
1744 1754 # make sure internals are initialized
1745 1755 dest.revision(len(dest) - 1)
1746 1756 yield dest
1747 1757 del dest, vfs
1748 1758 finally:
1749 1759 shutil.rmtree(tmpdir, True)
1750 1760
1751 1761 @command(b'perfrevlogchunks', revlogopts + formatteropts +
1752 1762 [(b'e', b'engines', b'', b'compression engines to use'),
1753 1763 (b's', b'startrev', 0, b'revision to start at')],
1754 1764 b'-c|-m|FILE')
1755 1765 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
1756 1766 """Benchmark operations on revlog chunks.
1757 1767
1758 1768 Logically, each revlog is a collection of fulltext revisions. However,
1759 1769 stored within each revlog are "chunks" of possibly compressed data. This
1760 1770 data needs to be read and decompressed or compressed and written.
1761 1771
1762 1772 This command measures the time it takes to read+decompress and recompress
1763 1773 chunks in a revlog. It effectively isolates I/O and compression performance.
1764 1774 For measurements of higher-level operations like resolving revisions,
1765 1775 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
1766 1776 """
1767 1777 opts = _byteskwargs(opts)
1768 1778
1769 1779 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
1770 1780
1771 1781 # _chunkraw was renamed to _getsegmentforrevs.
1772 1782 try:
1773 1783 segmentforrevs = rl._getsegmentforrevs
1774 1784 except AttributeError:
1775 1785 segmentforrevs = rl._chunkraw
1776 1786
1777 1787 # Verify engines argument.
1778 1788 if engines:
1779 1789 engines = set(e.strip() for e in engines.split(b','))
1780 1790 for engine in engines:
1781 1791 try:
1782 1792 util.compressionengines[engine]
1783 1793 except KeyError:
1784 1794 raise error.Abort(b'unknown compression engine: %s' % engine)
1785 1795 else:
1786 1796 engines = []
1787 1797 for e in util.compengines:
1788 1798 engine = util.compengines[e]
1789 1799 try:
1790 1800 if engine.available():
1791 1801 engine.revlogcompressor().compress(b'dummy')
1792 1802 engines.append(e)
1793 1803 except NotImplementedError:
1794 1804 pass
1795 1805
1796 1806 revs = list(rl.revs(startrev, len(rl) - 1))
1797 1807
1798 1808 def rlfh(rl):
1799 1809 if rl._inline:
1800 1810 return getsvfs(repo)(rl.indexfile)
1801 1811 else:
1802 1812 return getsvfs(repo)(rl.datafile)
1803 1813
1804 1814 def doread():
1805 1815 rl.clearcaches()
1806 1816 for rev in revs:
1807 1817 segmentforrevs(rev, rev)
1808 1818
1809 1819 def doreadcachedfh():
1810 1820 rl.clearcaches()
1811 1821 fh = rlfh(rl)
1812 1822 for rev in revs:
1813 1823 segmentforrevs(rev, rev, df=fh)
1814 1824
1815 1825 def doreadbatch():
1816 1826 rl.clearcaches()
1817 1827 segmentforrevs(revs[0], revs[-1])
1818 1828
1819 1829 def doreadbatchcachedfh():
1820 1830 rl.clearcaches()
1821 1831 fh = rlfh(rl)
1822 1832 segmentforrevs(revs[0], revs[-1], df=fh)
1823 1833
1824 1834 def dochunk():
1825 1835 rl.clearcaches()
1826 1836 fh = rlfh(rl)
1827 1837 for rev in revs:
1828 1838 rl._chunk(rev, df=fh)
1829 1839
1830 1840 chunks = [None]
1831 1841
1832 1842 def dochunkbatch():
1833 1843 rl.clearcaches()
1834 1844 fh = rlfh(rl)
1835 1845 # Save chunks as a side-effect.
1836 1846 chunks[0] = rl._chunks(revs, df=fh)
1837 1847
1838 1848 def docompress(compressor):
1839 1849 rl.clearcaches()
1840 1850
1841 1851 try:
1842 1852 # Swap in the requested compression engine.
1843 1853 oldcompressor = rl._compressor
1844 1854 rl._compressor = compressor
1845 1855 for chunk in chunks[0]:
1846 1856 rl.compress(chunk)
1847 1857 finally:
1848 1858 rl._compressor = oldcompressor
1849 1859
1850 1860 benches = [
1851 1861 (lambda: doread(), b'read'),
1852 1862 (lambda: doreadcachedfh(), b'read w/ reused fd'),
1853 1863 (lambda: doreadbatch(), b'read batch'),
1854 1864 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
1855 1865 (lambda: dochunk(), b'chunk'),
1856 1866 (lambda: dochunkbatch(), b'chunk batch'),
1857 1867 ]
1858 1868
1859 1869 for engine in sorted(engines):
1860 1870 compressor = util.compengines[engine].revlogcompressor()
1861 1871 benches.append((functools.partial(docompress, compressor),
1862 1872 b'compress w/ %s' % engine))
1863 1873
1864 1874 for fn, title in benches:
1865 1875 timer, fm = gettimer(ui, opts)
1866 1876 timer(fn, title=title)
1867 1877 fm.end()
1868 1878
1869 1879 @command(b'perfrevlogrevision', revlogopts + formatteropts +
1870 1880 [(b'', b'cache', False, b'use caches instead of clearing')],
1871 1881 b'-c|-m|FILE REV')
1872 1882 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
1873 1883 """Benchmark obtaining a revlog revision.
1874 1884
1875 1885 Obtaining a revlog revision consists of roughly the following steps:
1876 1886
1877 1887 1. Compute the delta chain
1878 1888 2. Slice the delta chain if applicable
1879 1889 3. Obtain the raw chunks for that delta chain
1880 1890 4. Decompress each raw chunk
1881 1891 5. Apply binary patches to obtain fulltext
1882 1892 6. Verify hash of fulltext
1883 1893
1884 1894 This command measures the time spent in each of these phases.
1885 1895 """
1886 1896 opts = _byteskwargs(opts)
1887 1897
1888 1898 if opts.get(b'changelog') or opts.get(b'manifest'):
1889 1899 file_, rev = None, file_
1890 1900 elif rev is None:
1891 1901 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
1892 1902
1893 1903 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
1894 1904
1895 1905 # _chunkraw was renamed to _getsegmentforrevs.
1896 1906 try:
1897 1907 segmentforrevs = r._getsegmentforrevs
1898 1908 except AttributeError:
1899 1909 segmentforrevs = r._chunkraw
1900 1910
1901 1911 node = r.lookup(rev)
1902 1912 rev = r.rev(node)
1903 1913
1904 1914 def getrawchunks(data, chain):
1905 1915 start = r.start
1906 1916 length = r.length
1907 1917 inline = r._inline
1908 1918 iosize = r._io.size
1909 1919 buffer = util.buffer
1910 1920
1911 1921 chunks = []
1912 1922 ladd = chunks.append
1913 1923 for idx, item in enumerate(chain):
1914 1924 offset = start(item[0])
1915 1925 bits = data[idx]
1916 1926 for rev in item:
1917 1927 chunkstart = start(rev)
1918 1928 if inline:
1919 1929 chunkstart += (rev + 1) * iosize
1920 1930 chunklength = length(rev)
1921 1931 ladd(buffer(bits, chunkstart - offset, chunklength))
1922 1932
1923 1933 return chunks
1924 1934
1925 1935 def dodeltachain(rev):
1926 1936 if not cache:
1927 1937 r.clearcaches()
1928 1938 r._deltachain(rev)
1929 1939
1930 1940 def doread(chain):
1931 1941 if not cache:
1932 1942 r.clearcaches()
1933 1943 for item in slicedchain:
1934 1944 segmentforrevs(item[0], item[-1])
1935 1945
1936 1946 def doslice(r, chain, size):
1937 1947 for s in slicechunk(r, chain, targetsize=size):
1938 1948 pass
1939 1949
1940 1950 def dorawchunks(data, chain):
1941 1951 if not cache:
1942 1952 r.clearcaches()
1943 1953 getrawchunks(data, chain)
1944 1954
1945 1955 def dodecompress(chunks):
1946 1956 decomp = r.decompress
1947 1957 for chunk in chunks:
1948 1958 decomp(chunk)
1949 1959
1950 1960 def dopatch(text, bins):
1951 1961 if not cache:
1952 1962 r.clearcaches()
1953 1963 mdiff.patches(text, bins)
1954 1964
1955 1965 def dohash(text):
1956 1966 if not cache:
1957 1967 r.clearcaches()
1958 1968 r.checkhash(text, node, rev=rev)
1959 1969
1960 1970 def dorevision():
1961 1971 if not cache:
1962 1972 r.clearcaches()
1963 1973 r.revision(node)
1964 1974
1965 1975 try:
1966 1976 from mercurial.revlogutils.deltas import slicechunk
1967 1977 except ImportError:
1968 1978 slicechunk = getattr(revlog, '_slicechunk', None)
1969 1979
1970 1980 size = r.length(rev)
1971 1981 chain = r._deltachain(rev)[0]
1972 1982 if not getattr(r, '_withsparseread', False):
1973 1983 slicedchain = (chain,)
1974 1984 else:
1975 1985 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
1976 1986 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
1977 1987 rawchunks = getrawchunks(data, slicedchain)
1978 1988 bins = r._chunks(chain)
1979 1989 text = bytes(bins[0])
1980 1990 bins = bins[1:]
1981 1991 text = mdiff.patches(text, bins)
1982 1992
1983 1993 benches = [
1984 1994 (lambda: dorevision(), b'full'),
1985 1995 (lambda: dodeltachain(rev), b'deltachain'),
1986 1996 (lambda: doread(chain), b'read'),
1987 1997 ]
1988 1998
1989 1999 if getattr(r, '_withsparseread', False):
1990 2000 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
1991 2001 benches.append(slicing)
1992 2002
1993 2003 benches.extend([
1994 2004 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
1995 2005 (lambda: dodecompress(rawchunks), b'decompress'),
1996 2006 (lambda: dopatch(text, bins), b'patch'),
1997 2007 (lambda: dohash(text), b'hash'),
1998 2008 ])
1999 2009
2000 2010 timer, fm = gettimer(ui, opts)
2001 2011 for fn, title in benches:
2002 2012 timer(fn, title=title)
2003 2013 fm.end()
2004 2014
2005 2015 @command(b'perfrevset',
2006 2016 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
2007 2017 (b'', b'contexts', False, b'obtain changectx for each revision')]
2008 2018 + formatteropts, b"REVSET")
2009 2019 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
2010 2020 """benchmark the execution time of a revset
2011 2021
2012 2022 Use the --clean option if need to evaluate the impact of build volatile
2013 2023 revisions set cache on the revset execution. Volatile cache hold filtered
2014 2024 and obsolete related cache."""
2015 2025 opts = _byteskwargs(opts)
2016 2026
2017 2027 timer, fm = gettimer(ui, opts)
2018 2028 def d():
2019 2029 if clear:
2020 2030 repo.invalidatevolatilesets()
2021 2031 if contexts:
2022 2032 for ctx in repo.set(expr): pass
2023 2033 else:
2024 2034 for r in repo.revs(expr): pass
2025 2035 timer(d)
2026 2036 fm.end()
2027 2037
2028 2038 @command(b'perfvolatilesets',
2029 2039 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
2030 2040 ] + formatteropts)
2031 2041 def perfvolatilesets(ui, repo, *names, **opts):
2032 2042 """benchmark the computation of various volatile set
2033 2043
2034 2044 Volatile set computes element related to filtering and obsolescence."""
2035 2045 opts = _byteskwargs(opts)
2036 2046 timer, fm = gettimer(ui, opts)
2037 2047 repo = repo.unfiltered()
2038 2048
2039 2049 def getobs(name):
2040 2050 def d():
2041 2051 repo.invalidatevolatilesets()
2042 2052 if opts[b'clear_obsstore']:
2043 2053 clearfilecache(repo, b'obsstore')
2044 2054 obsolete.getrevs(repo, name)
2045 2055 return d
2046 2056
2047 2057 allobs = sorted(obsolete.cachefuncs)
2048 2058 if names:
2049 2059 allobs = [n for n in allobs if n in names]
2050 2060
2051 2061 for name in allobs:
2052 2062 timer(getobs(name), title=name)
2053 2063
2054 2064 def getfiltered(name):
2055 2065 def d():
2056 2066 repo.invalidatevolatilesets()
2057 2067 if opts[b'clear_obsstore']:
2058 2068 clearfilecache(repo, b'obsstore')
2059 2069 repoview.filterrevs(repo, name)
2060 2070 return d
2061 2071
2062 2072 allfilter = sorted(repoview.filtertable)
2063 2073 if names:
2064 2074 allfilter = [n for n in allfilter if n in names]
2065 2075
2066 2076 for name in allfilter:
2067 2077 timer(getfiltered(name), title=name)
2068 2078 fm.end()
2069 2079
2070 2080 @command(b'perfbranchmap',
2071 2081 [(b'f', b'full', False,
2072 2082 b'Includes build time of subset'),
2073 2083 (b'', b'clear-revbranch', False,
2074 2084 b'purge the revbranch cache between computation'),
2075 2085 ] + formatteropts)
2076 2086 def perfbranchmap(ui, repo, *filternames, **opts):
2077 2087 """benchmark the update of a branchmap
2078 2088
2079 2089 This benchmarks the full repo.branchmap() call with read and write disabled
2080 2090 """
2081 2091 opts = _byteskwargs(opts)
2082 2092 full = opts.get(b"full", False)
2083 2093 clear_revbranch = opts.get(b"clear_revbranch", False)
2084 2094 timer, fm = gettimer(ui, opts)
2085 2095 def getbranchmap(filtername):
2086 2096 """generate a benchmark function for the filtername"""
2087 2097 if filtername is None:
2088 2098 view = repo
2089 2099 else:
2090 2100 view = repo.filtered(filtername)
2091 2101 def d():
2092 2102 if clear_revbranch:
2093 2103 repo.revbranchcache()._clear()
2094 2104 if full:
2095 2105 view._branchcaches.clear()
2096 2106 else:
2097 2107 view._branchcaches.pop(filtername, None)
2098 2108 view.branchmap()
2099 2109 return d
2100 2110 # add filter in smaller subset to bigger subset
2101 2111 possiblefilters = set(repoview.filtertable)
2102 2112 if filternames:
2103 2113 possiblefilters &= set(filternames)
2104 2114 subsettable = getbranchmapsubsettable()
2105 2115 allfilters = []
2106 2116 while possiblefilters:
2107 2117 for name in possiblefilters:
2108 2118 subset = subsettable.get(name)
2109 2119 if subset not in possiblefilters:
2110 2120 break
2111 2121 else:
2112 2122 assert False, b'subset cycle %s!' % possiblefilters
2113 2123 allfilters.append(name)
2114 2124 possiblefilters.remove(name)
2115 2125
2116 2126 # warm the cache
2117 2127 if not full:
2118 2128 for name in allfilters:
2119 2129 repo.filtered(name).branchmap()
2120 2130 if not filternames or b'unfiltered' in filternames:
2121 2131 # add unfiltered
2122 2132 allfilters.append(None)
2123 2133
2124 2134 branchcacheread = safeattrsetter(branchmap, b'read')
2125 2135 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
2126 2136 branchcacheread.set(lambda repo: None)
2127 2137 branchcachewrite.set(lambda bc, repo: None)
2128 2138 try:
2129 2139 for name in allfilters:
2130 2140 printname = name
2131 2141 if name is None:
2132 2142 printname = b'unfiltered'
2133 2143 timer(getbranchmap(name), title=str(printname))
2134 2144 finally:
2135 2145 branchcacheread.restore()
2136 2146 branchcachewrite.restore()
2137 2147 fm.end()
2138 2148
2139 2149 @command(b'perfbranchmapload', [
2140 2150 (b'f', b'filter', b'', b'Specify repoview filter'),
2141 2151 (b'', b'list', False, b'List brachmap filter caches'),
2142 2152 ] + formatteropts)
2143 2153 def perfbranchmapread(ui, repo, filter=b'', list=False, **opts):
2144 2154 """benchmark reading the branchmap"""
2145 2155 opts = _byteskwargs(opts)
2146 2156
2147 2157 if list:
2148 2158 for name, kind, st in repo.cachevfs.readdir(stat=True):
2149 2159 if name.startswith(b'branch2'):
2150 2160 filtername = name.partition(b'-')[2] or b'unfiltered'
2151 2161 ui.status(b'%s - %s\n'
2152 2162 % (filtername, util.bytecount(st.st_size)))
2153 2163 return
2154 2164 if filter:
2155 2165 repo = repoview.repoview(repo, filter)
2156 2166 else:
2157 2167 repo = repo.unfiltered()
2158 2168 # try once without timer, the filter may not be cached
2159 2169 if branchmap.read(repo) is None:
2160 2170 raise error.Abort(b'No brachmap cached for %s repo'
2161 2171 % (filter or b'unfiltered'))
2162 2172 timer, fm = gettimer(ui, opts)
2163 2173 timer(lambda: branchmap.read(repo) and None)
2164 2174 fm.end()
2165 2175
2166 2176 @command(b'perfloadmarkers')
2167 2177 def perfloadmarkers(ui, repo):
2168 2178 """benchmark the time to parse the on-disk markers for a repo
2169 2179
2170 2180 Result is the number of markers in the repo."""
2171 2181 timer, fm = gettimer(ui)
2172 2182 svfs = getsvfs(repo)
2173 2183 timer(lambda: len(obsolete.obsstore(svfs)))
2174 2184 fm.end()
2175 2185
2176 2186 @command(b'perflrucachedict', formatteropts +
2177 2187 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
2178 2188 (b'', b'mincost', 0, b'smallest cost of items in cache'),
2179 2189 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
2180 2190 (b'', b'size', 4, b'size of cache'),
2181 2191 (b'', b'gets', 10000, b'number of key lookups'),
2182 2192 (b'', b'sets', 10000, b'number of key sets'),
2183 2193 (b'', b'mixed', 10000, b'number of mixed mode operations'),
2184 2194 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
2185 2195 norepo=True)
2186 2196 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
2187 2197 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
2188 2198 opts = _byteskwargs(opts)
2189 2199
2190 2200 def doinit():
2191 2201 for i in _xrange(10000):
2192 2202 util.lrucachedict(size)
2193 2203
2194 2204 costrange = list(range(mincost, maxcost + 1))
2195 2205
2196 2206 values = []
2197 2207 for i in _xrange(size):
2198 2208 values.append(random.randint(0, _maxint))
2199 2209
2200 2210 # Get mode fills the cache and tests raw lookup performance with no
2201 2211 # eviction.
2202 2212 getseq = []
2203 2213 for i in _xrange(gets):
2204 2214 getseq.append(random.choice(values))
2205 2215
2206 2216 def dogets():
2207 2217 d = util.lrucachedict(size)
2208 2218 for v in values:
2209 2219 d[v] = v
2210 2220 for key in getseq:
2211 2221 value = d[key]
2212 2222 value # silence pyflakes warning
2213 2223
2214 2224 def dogetscost():
2215 2225 d = util.lrucachedict(size, maxcost=costlimit)
2216 2226 for i, v in enumerate(values):
2217 2227 d.insert(v, v, cost=costs[i])
2218 2228 for key in getseq:
2219 2229 try:
2220 2230 value = d[key]
2221 2231 value # silence pyflakes warning
2222 2232 except KeyError:
2223 2233 pass
2224 2234
2225 2235 # Set mode tests insertion speed with cache eviction.
2226 2236 setseq = []
2227 2237 costs = []
2228 2238 for i in _xrange(sets):
2229 2239 setseq.append(random.randint(0, _maxint))
2230 2240 costs.append(random.choice(costrange))
2231 2241
2232 2242 def doinserts():
2233 2243 d = util.lrucachedict(size)
2234 2244 for v in setseq:
2235 2245 d.insert(v, v)
2236 2246
2237 2247 def doinsertscost():
2238 2248 d = util.lrucachedict(size, maxcost=costlimit)
2239 2249 for i, v in enumerate(setseq):
2240 2250 d.insert(v, v, cost=costs[i])
2241 2251
2242 2252 def dosets():
2243 2253 d = util.lrucachedict(size)
2244 2254 for v in setseq:
2245 2255 d[v] = v
2246 2256
2247 2257 # Mixed mode randomly performs gets and sets with eviction.
2248 2258 mixedops = []
2249 2259 for i in _xrange(mixed):
2250 2260 r = random.randint(0, 100)
2251 2261 if r < mixedgetfreq:
2252 2262 op = 0
2253 2263 else:
2254 2264 op = 1
2255 2265
2256 2266 mixedops.append((op,
2257 2267 random.randint(0, size * 2),
2258 2268 random.choice(costrange)))
2259 2269
2260 2270 def domixed():
2261 2271 d = util.lrucachedict(size)
2262 2272
2263 2273 for op, v, cost in mixedops:
2264 2274 if op == 0:
2265 2275 try:
2266 2276 d[v]
2267 2277 except KeyError:
2268 2278 pass
2269 2279 else:
2270 2280 d[v] = v
2271 2281
2272 2282 def domixedcost():
2273 2283 d = util.lrucachedict(size, maxcost=costlimit)
2274 2284
2275 2285 for op, v, cost in mixedops:
2276 2286 if op == 0:
2277 2287 try:
2278 2288 d[v]
2279 2289 except KeyError:
2280 2290 pass
2281 2291 else:
2282 2292 d.insert(v, v, cost=cost)
2283 2293
2284 2294 benches = [
2285 2295 (doinit, b'init'),
2286 2296 ]
2287 2297
2288 2298 if costlimit:
2289 2299 benches.extend([
2290 2300 (dogetscost, b'gets w/ cost limit'),
2291 2301 (doinsertscost, b'inserts w/ cost limit'),
2292 2302 (domixedcost, b'mixed w/ cost limit'),
2293 2303 ])
2294 2304 else:
2295 2305 benches.extend([
2296 2306 (dogets, b'gets'),
2297 2307 (doinserts, b'inserts'),
2298 2308 (dosets, b'sets'),
2299 2309 (domixed, b'mixed')
2300 2310 ])
2301 2311
2302 2312 for fn, title in benches:
2303 2313 timer, fm = gettimer(ui, opts)
2304 2314 timer(fn, title=title)
2305 2315 fm.end()
2306 2316
2307 2317 @command(b'perfwrite', formatteropts)
2308 2318 def perfwrite(ui, repo, **opts):
2309 2319 """microbenchmark ui.write
2310 2320 """
2311 2321 opts = _byteskwargs(opts)
2312 2322
2313 2323 timer, fm = gettimer(ui, opts)
2314 2324 def write():
2315 2325 for i in range(100000):
2316 2326 ui.write((b'Testing write performance\n'))
2317 2327 timer(write)
2318 2328 fm.end()
2319 2329
2320 2330 def uisetup(ui):
2321 2331 if (util.safehasattr(cmdutil, b'openrevlog') and
2322 2332 not util.safehasattr(commands, b'debugrevlogopts')):
2323 2333 # for "historical portability":
2324 2334 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
2325 2335 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
2326 2336 # openrevlog() should cause failure, because it has been
2327 2337 # available since 3.5 (or 49c583ca48c4).
2328 2338 def openrevlog(orig, repo, cmd, file_, opts):
2329 2339 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
2330 2340 raise error.Abort(b"This version doesn't support --dir option",
2331 2341 hint=b"use 3.5 or later")
2332 2342 return orig(repo, cmd, file_, opts)
2333 2343 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
General Comments 0
You need to be logged in to leave comments. Login now