##// END OF EJS Templates
perf: pass limits as a function argument...
marmoute -
r42185:0e642294 default
parent child Browse files
Show More
@@ -1,2816 +1,2817 b''
1 # perf.py - performance test routines
1 # perf.py - performance test routines
2 '''helper extension to measure performance
2 '''helper extension to measure performance
3
3
4 Configurations
4 Configurations
5 ==============
5 ==============
6
6
7 ``perf``
7 ``perf``
8 --------
8 --------
9
9
10 ``all-timing``
10 ``all-timing``
11 When set, additional statistic will be reported for each benchmark: best,
11 When set, additional statistic will be reported for each benchmark: best,
12 worst, median average. If not set only the best timing is reported
12 worst, median average. If not set only the best timing is reported
13 (default: off).
13 (default: off).
14
14
15 ``presleep``
15 ``presleep``
16 number of second to wait before any group of run (default: 1)
16 number of second to wait before any group of run (default: 1)
17
17
18 ``stub``
18 ``stub``
19 When set, benchmark will only be run once, useful for testing (default: off)
19 When set, benchmark will only be run once, useful for testing (default: off)
20 '''
20 '''
21
21
22 # "historical portability" policy of perf.py:
22 # "historical portability" policy of perf.py:
23 #
23 #
24 # We have to do:
24 # We have to do:
25 # - make perf.py "loadable" with as wide Mercurial version as possible
25 # - make perf.py "loadable" with as wide Mercurial version as possible
26 # This doesn't mean that perf commands work correctly with that Mercurial.
26 # This doesn't mean that perf commands work correctly with that Mercurial.
27 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
27 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
28 # - make historical perf command work correctly with as wide Mercurial
28 # - make historical perf command work correctly with as wide Mercurial
29 # version as possible
29 # version as possible
30 #
30 #
31 # We have to do, if possible with reasonable cost:
31 # We have to do, if possible with reasonable cost:
32 # - make recent perf command for historical feature work correctly
32 # - make recent perf command for historical feature work correctly
33 # with early Mercurial
33 # with early Mercurial
34 #
34 #
35 # We don't have to do:
35 # We don't have to do:
36 # - make perf command for recent feature work correctly with early
36 # - make perf command for recent feature work correctly with early
37 # Mercurial
37 # Mercurial
38
38
39 from __future__ import absolute_import
39 from __future__ import absolute_import
40 import contextlib
40 import contextlib
41 import functools
41 import functools
42 import gc
42 import gc
43 import os
43 import os
44 import random
44 import random
45 import shutil
45 import shutil
46 import struct
46 import struct
47 import sys
47 import sys
48 import tempfile
48 import tempfile
49 import threading
49 import threading
50 import time
50 import time
51 from mercurial import (
51 from mercurial import (
52 changegroup,
52 changegroup,
53 cmdutil,
53 cmdutil,
54 commands,
54 commands,
55 copies,
55 copies,
56 error,
56 error,
57 extensions,
57 extensions,
58 hg,
58 hg,
59 mdiff,
59 mdiff,
60 merge,
60 merge,
61 revlog,
61 revlog,
62 util,
62 util,
63 )
63 )
64
64
65 # for "historical portability":
65 # for "historical portability":
66 # try to import modules separately (in dict order), and ignore
66 # try to import modules separately (in dict order), and ignore
67 # failure, because these aren't available with early Mercurial
67 # failure, because these aren't available with early Mercurial
68 try:
68 try:
69 from mercurial import branchmap # since 2.5 (or bcee63733aad)
69 from mercurial import branchmap # since 2.5 (or bcee63733aad)
70 except ImportError:
70 except ImportError:
71 pass
71 pass
72 try:
72 try:
73 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
73 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
74 except ImportError:
74 except ImportError:
75 pass
75 pass
76 try:
76 try:
77 from mercurial import registrar # since 3.7 (or 37d50250b696)
77 from mercurial import registrar # since 3.7 (or 37d50250b696)
78 dir(registrar) # forcibly load it
78 dir(registrar) # forcibly load it
79 except ImportError:
79 except ImportError:
80 registrar = None
80 registrar = None
81 try:
81 try:
82 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
82 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
83 except ImportError:
83 except ImportError:
84 pass
84 pass
85 try:
85 try:
86 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
86 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
87 except ImportError:
87 except ImportError:
88 pass
88 pass
89 try:
89 try:
90 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
90 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
91 except ImportError:
91 except ImportError:
92 pass
92 pass
93
93
94
94
95 def identity(a):
95 def identity(a):
96 return a
96 return a
97
97
98 try:
98 try:
99 from mercurial import pycompat
99 from mercurial import pycompat
100 getargspec = pycompat.getargspec # added to module after 4.5
100 getargspec = pycompat.getargspec # added to module after 4.5
101 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
101 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
102 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
102 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
103 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
103 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
104 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
104 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
105 if pycompat.ispy3:
105 if pycompat.ispy3:
106 _maxint = sys.maxsize # per py3 docs for replacing maxint
106 _maxint = sys.maxsize # per py3 docs for replacing maxint
107 else:
107 else:
108 _maxint = sys.maxint
108 _maxint = sys.maxint
109 except (ImportError, AttributeError):
109 except (ImportError, AttributeError):
110 import inspect
110 import inspect
111 getargspec = inspect.getargspec
111 getargspec = inspect.getargspec
112 _byteskwargs = identity
112 _byteskwargs = identity
113 fsencode = identity # no py3 support
113 fsencode = identity # no py3 support
114 _maxint = sys.maxint # no py3 support
114 _maxint = sys.maxint # no py3 support
115 _sysstr = lambda x: x # no py3 support
115 _sysstr = lambda x: x # no py3 support
116 _xrange = xrange
116 _xrange = xrange
117
117
118 try:
118 try:
119 # 4.7+
119 # 4.7+
120 queue = pycompat.queue.Queue
120 queue = pycompat.queue.Queue
121 except (AttributeError, ImportError):
121 except (AttributeError, ImportError):
122 # <4.7.
122 # <4.7.
123 try:
123 try:
124 queue = pycompat.queue
124 queue = pycompat.queue
125 except (AttributeError, ImportError):
125 except (AttributeError, ImportError):
126 queue = util.queue
126 queue = util.queue
127
127
128 try:
128 try:
129 from mercurial import logcmdutil
129 from mercurial import logcmdutil
130 makelogtemplater = logcmdutil.maketemplater
130 makelogtemplater = logcmdutil.maketemplater
131 except (AttributeError, ImportError):
131 except (AttributeError, ImportError):
132 try:
132 try:
133 makelogtemplater = cmdutil.makelogtemplater
133 makelogtemplater = cmdutil.makelogtemplater
134 except (AttributeError, ImportError):
134 except (AttributeError, ImportError):
135 makelogtemplater = None
135 makelogtemplater = None
136
136
137 # for "historical portability":
137 # for "historical portability":
138 # define util.safehasattr forcibly, because util.safehasattr has been
138 # define util.safehasattr forcibly, because util.safehasattr has been
139 # available since 1.9.3 (or 94b200a11cf7)
139 # available since 1.9.3 (or 94b200a11cf7)
140 _undefined = object()
140 _undefined = object()
141 def safehasattr(thing, attr):
141 def safehasattr(thing, attr):
142 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
142 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
143 setattr(util, 'safehasattr', safehasattr)
143 setattr(util, 'safehasattr', safehasattr)
144
144
145 # for "historical portability":
145 # for "historical portability":
146 # define util.timer forcibly, because util.timer has been available
146 # define util.timer forcibly, because util.timer has been available
147 # since ae5d60bb70c9
147 # since ae5d60bb70c9
148 if safehasattr(time, 'perf_counter'):
148 if safehasattr(time, 'perf_counter'):
149 util.timer = time.perf_counter
149 util.timer = time.perf_counter
150 elif os.name == b'nt':
150 elif os.name == b'nt':
151 util.timer = time.clock
151 util.timer = time.clock
152 else:
152 else:
153 util.timer = time.time
153 util.timer = time.time
154
154
155 # for "historical portability":
155 # for "historical portability":
156 # use locally defined empty option list, if formatteropts isn't
156 # use locally defined empty option list, if formatteropts isn't
157 # available, because commands.formatteropts has been available since
157 # available, because commands.formatteropts has been available since
158 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
158 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
159 # available since 2.2 (or ae5f92e154d3)
159 # available since 2.2 (or ae5f92e154d3)
160 formatteropts = getattr(cmdutil, "formatteropts",
160 formatteropts = getattr(cmdutil, "formatteropts",
161 getattr(commands, "formatteropts", []))
161 getattr(commands, "formatteropts", []))
162
162
163 # for "historical portability":
163 # for "historical portability":
164 # use locally defined option list, if debugrevlogopts isn't available,
164 # use locally defined option list, if debugrevlogopts isn't available,
165 # because commands.debugrevlogopts has been available since 3.7 (or
165 # because commands.debugrevlogopts has been available since 3.7 (or
166 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
166 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
167 # since 1.9 (or a79fea6b3e77).
167 # since 1.9 (or a79fea6b3e77).
168 revlogopts = getattr(cmdutil, "debugrevlogopts",
168 revlogopts = getattr(cmdutil, "debugrevlogopts",
169 getattr(commands, "debugrevlogopts", [
169 getattr(commands, "debugrevlogopts", [
170 (b'c', b'changelog', False, (b'open changelog')),
170 (b'c', b'changelog', False, (b'open changelog')),
171 (b'm', b'manifest', False, (b'open manifest')),
171 (b'm', b'manifest', False, (b'open manifest')),
172 (b'', b'dir', False, (b'open directory manifest')),
172 (b'', b'dir', False, (b'open directory manifest')),
173 ]))
173 ]))
174
174
175 cmdtable = {}
175 cmdtable = {}
176
176
177 # for "historical portability":
177 # for "historical portability":
178 # define parsealiases locally, because cmdutil.parsealiases has been
178 # define parsealiases locally, because cmdutil.parsealiases has been
179 # available since 1.5 (or 6252852b4332)
179 # available since 1.5 (or 6252852b4332)
180 def parsealiases(cmd):
180 def parsealiases(cmd):
181 return cmd.split(b"|")
181 return cmd.split(b"|")
182
182
183 if safehasattr(registrar, 'command'):
183 if safehasattr(registrar, 'command'):
184 command = registrar.command(cmdtable)
184 command = registrar.command(cmdtable)
185 elif safehasattr(cmdutil, 'command'):
185 elif safehasattr(cmdutil, 'command'):
186 command = cmdutil.command(cmdtable)
186 command = cmdutil.command(cmdtable)
187 if b'norepo' not in getargspec(command).args:
187 if b'norepo' not in getargspec(command).args:
188 # for "historical portability":
188 # for "historical portability":
189 # wrap original cmdutil.command, because "norepo" option has
189 # wrap original cmdutil.command, because "norepo" option has
190 # been available since 3.1 (or 75a96326cecb)
190 # been available since 3.1 (or 75a96326cecb)
191 _command = command
191 _command = command
192 def command(name, options=(), synopsis=None, norepo=False):
192 def command(name, options=(), synopsis=None, norepo=False):
193 if norepo:
193 if norepo:
194 commands.norepo += b' %s' % b' '.join(parsealiases(name))
194 commands.norepo += b' %s' % b' '.join(parsealiases(name))
195 return _command(name, list(options), synopsis)
195 return _command(name, list(options), synopsis)
196 else:
196 else:
197 # for "historical portability":
197 # for "historical portability":
198 # define "@command" annotation locally, because cmdutil.command
198 # define "@command" annotation locally, because cmdutil.command
199 # has been available since 1.9 (or 2daa5179e73f)
199 # has been available since 1.9 (or 2daa5179e73f)
200 def command(name, options=(), synopsis=None, norepo=False):
200 def command(name, options=(), synopsis=None, norepo=False):
201 def decorator(func):
201 def decorator(func):
202 if synopsis:
202 if synopsis:
203 cmdtable[name] = func, list(options), synopsis
203 cmdtable[name] = func, list(options), synopsis
204 else:
204 else:
205 cmdtable[name] = func, list(options)
205 cmdtable[name] = func, list(options)
206 if norepo:
206 if norepo:
207 commands.norepo += b' %s' % b' '.join(parsealiases(name))
207 commands.norepo += b' %s' % b' '.join(parsealiases(name))
208 return func
208 return func
209 return decorator
209 return decorator
210
210
211 try:
211 try:
212 import mercurial.registrar
212 import mercurial.registrar
213 import mercurial.configitems
213 import mercurial.configitems
214 configtable = {}
214 configtable = {}
215 configitem = mercurial.registrar.configitem(configtable)
215 configitem = mercurial.registrar.configitem(configtable)
216 configitem(b'perf', b'presleep',
216 configitem(b'perf', b'presleep',
217 default=mercurial.configitems.dynamicdefault,
217 default=mercurial.configitems.dynamicdefault,
218 )
218 )
219 configitem(b'perf', b'stub',
219 configitem(b'perf', b'stub',
220 default=mercurial.configitems.dynamicdefault,
220 default=mercurial.configitems.dynamicdefault,
221 )
221 )
222 configitem(b'perf', b'parentscount',
222 configitem(b'perf', b'parentscount',
223 default=mercurial.configitems.dynamicdefault,
223 default=mercurial.configitems.dynamicdefault,
224 )
224 )
225 configitem(b'perf', b'all-timing',
225 configitem(b'perf', b'all-timing',
226 default=mercurial.configitems.dynamicdefault,
226 default=mercurial.configitems.dynamicdefault,
227 )
227 )
228 except (ImportError, AttributeError):
228 except (ImportError, AttributeError):
229 pass
229 pass
230
230
231 def getlen(ui):
231 def getlen(ui):
232 if ui.configbool(b"perf", b"stub", False):
232 if ui.configbool(b"perf", b"stub", False):
233 return lambda x: 1
233 return lambda x: 1
234 return len
234 return len
235
235
236 def gettimer(ui, opts=None):
236 def gettimer(ui, opts=None):
237 """return a timer function and formatter: (timer, formatter)
237 """return a timer function and formatter: (timer, formatter)
238
238
239 This function exists to gather the creation of formatter in a single
239 This function exists to gather the creation of formatter in a single
240 place instead of duplicating it in all performance commands."""
240 place instead of duplicating it in all performance commands."""
241
241
242 # enforce an idle period before execution to counteract power management
242 # enforce an idle period before execution to counteract power management
243 # experimental config: perf.presleep
243 # experimental config: perf.presleep
244 time.sleep(getint(ui, b"perf", b"presleep", 1))
244 time.sleep(getint(ui, b"perf", b"presleep", 1))
245
245
246 if opts is None:
246 if opts is None:
247 opts = {}
247 opts = {}
248 # redirect all to stderr unless buffer api is in use
248 # redirect all to stderr unless buffer api is in use
249 if not ui._buffers:
249 if not ui._buffers:
250 ui = ui.copy()
250 ui = ui.copy()
251 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
251 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
252 if uifout:
252 if uifout:
253 # for "historical portability":
253 # for "historical portability":
254 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
254 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
255 uifout.set(ui.ferr)
255 uifout.set(ui.ferr)
256
256
257 # get a formatter
257 # get a formatter
258 uiformatter = getattr(ui, 'formatter', None)
258 uiformatter = getattr(ui, 'formatter', None)
259 if uiformatter:
259 if uiformatter:
260 fm = uiformatter(b'perf', opts)
260 fm = uiformatter(b'perf', opts)
261 else:
261 else:
262 # for "historical portability":
262 # for "historical portability":
263 # define formatter locally, because ui.formatter has been
263 # define formatter locally, because ui.formatter has been
264 # available since 2.2 (or ae5f92e154d3)
264 # available since 2.2 (or ae5f92e154d3)
265 from mercurial import node
265 from mercurial import node
266 class defaultformatter(object):
266 class defaultformatter(object):
267 """Minimized composition of baseformatter and plainformatter
267 """Minimized composition of baseformatter and plainformatter
268 """
268 """
269 def __init__(self, ui, topic, opts):
269 def __init__(self, ui, topic, opts):
270 self._ui = ui
270 self._ui = ui
271 if ui.debugflag:
271 if ui.debugflag:
272 self.hexfunc = node.hex
272 self.hexfunc = node.hex
273 else:
273 else:
274 self.hexfunc = node.short
274 self.hexfunc = node.short
275 def __nonzero__(self):
275 def __nonzero__(self):
276 return False
276 return False
277 __bool__ = __nonzero__
277 __bool__ = __nonzero__
278 def startitem(self):
278 def startitem(self):
279 pass
279 pass
280 def data(self, **data):
280 def data(self, **data):
281 pass
281 pass
282 def write(self, fields, deftext, *fielddata, **opts):
282 def write(self, fields, deftext, *fielddata, **opts):
283 self._ui.write(deftext % fielddata, **opts)
283 self._ui.write(deftext % fielddata, **opts)
284 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
284 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
285 if cond:
285 if cond:
286 self._ui.write(deftext % fielddata, **opts)
286 self._ui.write(deftext % fielddata, **opts)
287 def plain(self, text, **opts):
287 def plain(self, text, **opts):
288 self._ui.write(text, **opts)
288 self._ui.write(text, **opts)
289 def end(self):
289 def end(self):
290 pass
290 pass
291 fm = defaultformatter(ui, b'perf', opts)
291 fm = defaultformatter(ui, b'perf', opts)
292
292
293 # stub function, runs code only once instead of in a loop
293 # stub function, runs code only once instead of in a loop
294 # experimental config: perf.stub
294 # experimental config: perf.stub
295 if ui.configbool(b"perf", b"stub", False):
295 if ui.configbool(b"perf", b"stub", False):
296 return functools.partial(stub_timer, fm), fm
296 return functools.partial(stub_timer, fm), fm
297
297
298 # experimental config: perf.all-timing
298 # experimental config: perf.all-timing
299 displayall = ui.configbool(b"perf", b"all-timing", False)
299 displayall = ui.configbool(b"perf", b"all-timing", False)
300 return functools.partial(_timer, fm, displayall=displayall), fm
300 return functools.partial(_timer, fm, displayall=displayall), fm
301
301
302 def stub_timer(fm, func, setup=None, title=None):
302 def stub_timer(fm, func, setup=None, title=None):
303 if setup is not None:
303 if setup is not None:
304 setup()
304 setup()
305 func()
305 func()
306
306
307 @contextlib.contextmanager
307 @contextlib.contextmanager
308 def timeone():
308 def timeone():
309 r = []
309 r = []
310 ostart = os.times()
310 ostart = os.times()
311 cstart = util.timer()
311 cstart = util.timer()
312 yield r
312 yield r
313 cstop = util.timer()
313 cstop = util.timer()
314 ostop = os.times()
314 ostop = os.times()
315 a, b = ostart, ostop
315 a, b = ostart, ostop
316 r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
316 r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
317
317
318
318
319 # list of stop condition (elapsed time, minimal run count)
319 # list of stop condition (elapsed time, minimal run count)
320 DEFAULTLIMITS = (
320 DEFAULTLIMITS = (
321 (3.0, 100),
321 (3.0, 100),
322 (10.0, 3),
322 (10.0, 3),
323 )
323 )
324
324
325 def _timer(fm, func, setup=None, title=None, displayall=False):
325 def _timer(fm, func, setup=None, title=None, displayall=False,
326 limits=DEFAULTLIMITS):
326 gc.collect()
327 gc.collect()
327 results = []
328 results = []
328 begin = util.timer()
329 begin = util.timer()
329 count = 0
330 count = 0
330 keepgoing = True
331 keepgoing = True
331 while keepgoing:
332 while keepgoing:
332 if setup is not None:
333 if setup is not None:
333 setup()
334 setup()
334 with timeone() as item:
335 with timeone() as item:
335 r = func()
336 r = func()
336 count += 1
337 count += 1
337 results.append(item[0])
338 results.append(item[0])
338 cstop = util.timer()
339 cstop = util.timer()
339 # Look for a stop condition.
340 # Look for a stop condition.
340 elapsed = cstop - begin
341 elapsed = cstop - begin
341 for t, mincount in DEFAULTLIMITS:
342 for t, mincount in limits:
342 if elapsed >= t and count >= mincount:
343 if elapsed >= t and count >= mincount:
343 keepgoing = False
344 keepgoing = False
344 break
345 break
345
346
346 formatone(fm, results, title=title, result=r,
347 formatone(fm, results, title=title, result=r,
347 displayall=displayall)
348 displayall=displayall)
348
349
349 def formatone(fm, timings, title=None, result=None, displayall=False):
350 def formatone(fm, timings, title=None, result=None, displayall=False):
350
351
351 count = len(timings)
352 count = len(timings)
352
353
353 fm.startitem()
354 fm.startitem()
354
355
355 if title:
356 if title:
356 fm.write(b'title', b'! %s\n', title)
357 fm.write(b'title', b'! %s\n', title)
357 if result:
358 if result:
358 fm.write(b'result', b'! result: %s\n', result)
359 fm.write(b'result', b'! result: %s\n', result)
359 def display(role, entry):
360 def display(role, entry):
360 prefix = b''
361 prefix = b''
361 if role != b'best':
362 if role != b'best':
362 prefix = b'%s.' % role
363 prefix = b'%s.' % role
363 fm.plain(b'!')
364 fm.plain(b'!')
364 fm.write(prefix + b'wall', b' wall %f', entry[0])
365 fm.write(prefix + b'wall', b' wall %f', entry[0])
365 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
366 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
366 fm.write(prefix + b'user', b' user %f', entry[1])
367 fm.write(prefix + b'user', b' user %f', entry[1])
367 fm.write(prefix + b'sys', b' sys %f', entry[2])
368 fm.write(prefix + b'sys', b' sys %f', entry[2])
368 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
369 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
369 fm.plain(b'\n')
370 fm.plain(b'\n')
370 timings.sort()
371 timings.sort()
371 min_val = timings[0]
372 min_val = timings[0]
372 display(b'best', min_val)
373 display(b'best', min_val)
373 if displayall:
374 if displayall:
374 max_val = timings[-1]
375 max_val = timings[-1]
375 display(b'max', max_val)
376 display(b'max', max_val)
376 avg = tuple([sum(x) / count for x in zip(*timings)])
377 avg = tuple([sum(x) / count for x in zip(*timings)])
377 display(b'avg', avg)
378 display(b'avg', avg)
378 median = timings[len(timings) // 2]
379 median = timings[len(timings) // 2]
379 display(b'median', median)
380 display(b'median', median)
380
381
381 # utilities for historical portability
382 # utilities for historical portability
382
383
383 def getint(ui, section, name, default):
384 def getint(ui, section, name, default):
384 # for "historical portability":
385 # for "historical portability":
385 # ui.configint has been available since 1.9 (or fa2b596db182)
386 # ui.configint has been available since 1.9 (or fa2b596db182)
386 v = ui.config(section, name, None)
387 v = ui.config(section, name, None)
387 if v is None:
388 if v is None:
388 return default
389 return default
389 try:
390 try:
390 return int(v)
391 return int(v)
391 except ValueError:
392 except ValueError:
392 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
393 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
393 % (section, name, v))
394 % (section, name, v))
394
395
395 def safeattrsetter(obj, name, ignoremissing=False):
396 def safeattrsetter(obj, name, ignoremissing=False):
396 """Ensure that 'obj' has 'name' attribute before subsequent setattr
397 """Ensure that 'obj' has 'name' attribute before subsequent setattr
397
398
398 This function is aborted, if 'obj' doesn't have 'name' attribute
399 This function is aborted, if 'obj' doesn't have 'name' attribute
399 at runtime. This avoids overlooking removal of an attribute, which
400 at runtime. This avoids overlooking removal of an attribute, which
400 breaks assumption of performance measurement, in the future.
401 breaks assumption of performance measurement, in the future.
401
402
402 This function returns the object to (1) assign a new value, and
403 This function returns the object to (1) assign a new value, and
403 (2) restore an original value to the attribute.
404 (2) restore an original value to the attribute.
404
405
405 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
406 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
406 abortion, and this function returns None. This is useful to
407 abortion, and this function returns None. This is useful to
407 examine an attribute, which isn't ensured in all Mercurial
408 examine an attribute, which isn't ensured in all Mercurial
408 versions.
409 versions.
409 """
410 """
410 if not util.safehasattr(obj, name):
411 if not util.safehasattr(obj, name):
411 if ignoremissing:
412 if ignoremissing:
412 return None
413 return None
413 raise error.Abort((b"missing attribute %s of %s might break assumption"
414 raise error.Abort((b"missing attribute %s of %s might break assumption"
414 b" of performance measurement") % (name, obj))
415 b" of performance measurement") % (name, obj))
415
416
416 origvalue = getattr(obj, _sysstr(name))
417 origvalue = getattr(obj, _sysstr(name))
417 class attrutil(object):
418 class attrutil(object):
418 def set(self, newvalue):
419 def set(self, newvalue):
419 setattr(obj, _sysstr(name), newvalue)
420 setattr(obj, _sysstr(name), newvalue)
420 def restore(self):
421 def restore(self):
421 setattr(obj, _sysstr(name), origvalue)
422 setattr(obj, _sysstr(name), origvalue)
422
423
423 return attrutil()
424 return attrutil()
424
425
425 # utilities to examine each internal API changes
426 # utilities to examine each internal API changes
426
427
427 def getbranchmapsubsettable():
428 def getbranchmapsubsettable():
428 # for "historical portability":
429 # for "historical portability":
429 # subsettable is defined in:
430 # subsettable is defined in:
430 # - branchmap since 2.9 (or 175c6fd8cacc)
431 # - branchmap since 2.9 (or 175c6fd8cacc)
431 # - repoview since 2.5 (or 59a9f18d4587)
432 # - repoview since 2.5 (or 59a9f18d4587)
432 for mod in (branchmap, repoview):
433 for mod in (branchmap, repoview):
433 subsettable = getattr(mod, 'subsettable', None)
434 subsettable = getattr(mod, 'subsettable', None)
434 if subsettable:
435 if subsettable:
435 return subsettable
436 return subsettable
436
437
437 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
438 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
438 # branchmap and repoview modules exist, but subsettable attribute
439 # branchmap and repoview modules exist, but subsettable attribute
439 # doesn't)
440 # doesn't)
440 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
441 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
441 hint=b"use 2.5 or later")
442 hint=b"use 2.5 or later")
442
443
443 def getsvfs(repo):
444 def getsvfs(repo):
444 """Return appropriate object to access files under .hg/store
445 """Return appropriate object to access files under .hg/store
445 """
446 """
446 # for "historical portability":
447 # for "historical portability":
447 # repo.svfs has been available since 2.3 (or 7034365089bf)
448 # repo.svfs has been available since 2.3 (or 7034365089bf)
448 svfs = getattr(repo, 'svfs', None)
449 svfs = getattr(repo, 'svfs', None)
449 if svfs:
450 if svfs:
450 return svfs
451 return svfs
451 else:
452 else:
452 return getattr(repo, 'sopener')
453 return getattr(repo, 'sopener')
453
454
454 def getvfs(repo):
455 def getvfs(repo):
455 """Return appropriate object to access files under .hg
456 """Return appropriate object to access files under .hg
456 """
457 """
457 # for "historical portability":
458 # for "historical portability":
458 # repo.vfs has been available since 2.3 (or 7034365089bf)
459 # repo.vfs has been available since 2.3 (or 7034365089bf)
459 vfs = getattr(repo, 'vfs', None)
460 vfs = getattr(repo, 'vfs', None)
460 if vfs:
461 if vfs:
461 return vfs
462 return vfs
462 else:
463 else:
463 return getattr(repo, 'opener')
464 return getattr(repo, 'opener')
464
465
465 def repocleartagscachefunc(repo):
466 def repocleartagscachefunc(repo):
466 """Return the function to clear tags cache according to repo internal API
467 """Return the function to clear tags cache according to repo internal API
467 """
468 """
468 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
469 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
469 # in this case, setattr(repo, '_tagscache', None) or so isn't
470 # in this case, setattr(repo, '_tagscache', None) or so isn't
470 # correct way to clear tags cache, because existing code paths
471 # correct way to clear tags cache, because existing code paths
471 # expect _tagscache to be a structured object.
472 # expect _tagscache to be a structured object.
472 def clearcache():
473 def clearcache():
473 # _tagscache has been filteredpropertycache since 2.5 (or
474 # _tagscache has been filteredpropertycache since 2.5 (or
474 # 98c867ac1330), and delattr() can't work in such case
475 # 98c867ac1330), and delattr() can't work in such case
475 if b'_tagscache' in vars(repo):
476 if b'_tagscache' in vars(repo):
476 del repo.__dict__[b'_tagscache']
477 del repo.__dict__[b'_tagscache']
477 return clearcache
478 return clearcache
478
479
479 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
480 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
480 if repotags: # since 1.4 (or 5614a628d173)
481 if repotags: # since 1.4 (or 5614a628d173)
481 return lambda : repotags.set(None)
482 return lambda : repotags.set(None)
482
483
483 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
484 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
484 if repotagscache: # since 0.6 (or d7df759d0e97)
485 if repotagscache: # since 0.6 (or d7df759d0e97)
485 return lambda : repotagscache.set(None)
486 return lambda : repotagscache.set(None)
486
487
487 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
488 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
488 # this point, but it isn't so problematic, because:
489 # this point, but it isn't so problematic, because:
489 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
490 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
490 # in perftags() causes failure soon
491 # in perftags() causes failure soon
491 # - perf.py itself has been available since 1.1 (or eb240755386d)
492 # - perf.py itself has been available since 1.1 (or eb240755386d)
492 raise error.Abort((b"tags API of this hg command is unknown"))
493 raise error.Abort((b"tags API of this hg command is unknown"))
493
494
494 # utilities to clear cache
495 # utilities to clear cache
495
496
496 def clearfilecache(obj, attrname):
497 def clearfilecache(obj, attrname):
497 unfiltered = getattr(obj, 'unfiltered', None)
498 unfiltered = getattr(obj, 'unfiltered', None)
498 if unfiltered is not None:
499 if unfiltered is not None:
499 obj = obj.unfiltered()
500 obj = obj.unfiltered()
500 if attrname in vars(obj):
501 if attrname in vars(obj):
501 delattr(obj, attrname)
502 delattr(obj, attrname)
502 obj._filecache.pop(attrname, None)
503 obj._filecache.pop(attrname, None)
503
504
504 def clearchangelog(repo):
505 def clearchangelog(repo):
505 if repo is not repo.unfiltered():
506 if repo is not repo.unfiltered():
506 object.__setattr__(repo, r'_clcachekey', None)
507 object.__setattr__(repo, r'_clcachekey', None)
507 object.__setattr__(repo, r'_clcache', None)
508 object.__setattr__(repo, r'_clcache', None)
508 clearfilecache(repo.unfiltered(), 'changelog')
509 clearfilecache(repo.unfiltered(), 'changelog')
509
510
510 # perf commands
511 # perf commands
511
512
512 @command(b'perfwalk', formatteropts)
513 @command(b'perfwalk', formatteropts)
513 def perfwalk(ui, repo, *pats, **opts):
514 def perfwalk(ui, repo, *pats, **opts):
514 opts = _byteskwargs(opts)
515 opts = _byteskwargs(opts)
515 timer, fm = gettimer(ui, opts)
516 timer, fm = gettimer(ui, opts)
516 m = scmutil.match(repo[None], pats, {})
517 m = scmutil.match(repo[None], pats, {})
517 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
518 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
518 ignored=False))))
519 ignored=False))))
519 fm.end()
520 fm.end()
520
521
521 @command(b'perfannotate', formatteropts)
522 @command(b'perfannotate', formatteropts)
522 def perfannotate(ui, repo, f, **opts):
523 def perfannotate(ui, repo, f, **opts):
523 opts = _byteskwargs(opts)
524 opts = _byteskwargs(opts)
524 timer, fm = gettimer(ui, opts)
525 timer, fm = gettimer(ui, opts)
525 fc = repo[b'.'][f]
526 fc = repo[b'.'][f]
526 timer(lambda: len(fc.annotate(True)))
527 timer(lambda: len(fc.annotate(True)))
527 fm.end()
528 fm.end()
528
529
529 @command(b'perfstatus',
530 @command(b'perfstatus',
530 [(b'u', b'unknown', False,
531 [(b'u', b'unknown', False,
531 b'ask status to look for unknown files')] + formatteropts)
532 b'ask status to look for unknown files')] + formatteropts)
532 def perfstatus(ui, repo, **opts):
533 def perfstatus(ui, repo, **opts):
533 opts = _byteskwargs(opts)
534 opts = _byteskwargs(opts)
534 #m = match.always(repo.root, repo.getcwd())
535 #m = match.always(repo.root, repo.getcwd())
535 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
536 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
536 # False))))
537 # False))))
537 timer, fm = gettimer(ui, opts)
538 timer, fm = gettimer(ui, opts)
538 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
539 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
539 fm.end()
540 fm.end()
540
541
541 @command(b'perfaddremove', formatteropts)
542 @command(b'perfaddremove', formatteropts)
542 def perfaddremove(ui, repo, **opts):
543 def perfaddremove(ui, repo, **opts):
543 opts = _byteskwargs(opts)
544 opts = _byteskwargs(opts)
544 timer, fm = gettimer(ui, opts)
545 timer, fm = gettimer(ui, opts)
545 try:
546 try:
546 oldquiet = repo.ui.quiet
547 oldquiet = repo.ui.quiet
547 repo.ui.quiet = True
548 repo.ui.quiet = True
548 matcher = scmutil.match(repo[None])
549 matcher = scmutil.match(repo[None])
549 opts[b'dry_run'] = True
550 opts[b'dry_run'] = True
550 if b'uipathfn' in getargspec(scmutil.addremove).args:
551 if b'uipathfn' in getargspec(scmutil.addremove).args:
551 uipathfn = scmutil.getuipathfn(repo)
552 uipathfn = scmutil.getuipathfn(repo)
552 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
553 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
553 else:
554 else:
554 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
555 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
555 finally:
556 finally:
556 repo.ui.quiet = oldquiet
557 repo.ui.quiet = oldquiet
557 fm.end()
558 fm.end()
558
559
559 def clearcaches(cl):
560 def clearcaches(cl):
560 # behave somewhat consistently across internal API changes
561 # behave somewhat consistently across internal API changes
561 if util.safehasattr(cl, b'clearcaches'):
562 if util.safehasattr(cl, b'clearcaches'):
562 cl.clearcaches()
563 cl.clearcaches()
563 elif util.safehasattr(cl, b'_nodecache'):
564 elif util.safehasattr(cl, b'_nodecache'):
564 from mercurial.node import nullid, nullrev
565 from mercurial.node import nullid, nullrev
565 cl._nodecache = {nullid: nullrev}
566 cl._nodecache = {nullid: nullrev}
566 cl._nodepos = None
567 cl._nodepos = None
567
568
568 @command(b'perfheads', formatteropts)
569 @command(b'perfheads', formatteropts)
569 def perfheads(ui, repo, **opts):
570 def perfheads(ui, repo, **opts):
570 """benchmark the computation of a changelog heads"""
571 """benchmark the computation of a changelog heads"""
571 opts = _byteskwargs(opts)
572 opts = _byteskwargs(opts)
572 timer, fm = gettimer(ui, opts)
573 timer, fm = gettimer(ui, opts)
573 cl = repo.changelog
574 cl = repo.changelog
574 def s():
575 def s():
575 clearcaches(cl)
576 clearcaches(cl)
576 def d():
577 def d():
577 len(cl.headrevs())
578 len(cl.headrevs())
578 timer(d, setup=s)
579 timer(d, setup=s)
579 fm.end()
580 fm.end()
580
581
581 @command(b'perftags', formatteropts+
582 @command(b'perftags', formatteropts+
582 [
583 [
583 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
584 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
584 ])
585 ])
585 def perftags(ui, repo, **opts):
586 def perftags(ui, repo, **opts):
586 opts = _byteskwargs(opts)
587 opts = _byteskwargs(opts)
587 timer, fm = gettimer(ui, opts)
588 timer, fm = gettimer(ui, opts)
588 repocleartagscache = repocleartagscachefunc(repo)
589 repocleartagscache = repocleartagscachefunc(repo)
589 clearrevlogs = opts[b'clear_revlogs']
590 clearrevlogs = opts[b'clear_revlogs']
590 def s():
591 def s():
591 if clearrevlogs:
592 if clearrevlogs:
592 clearchangelog(repo)
593 clearchangelog(repo)
593 clearfilecache(repo.unfiltered(), 'manifest')
594 clearfilecache(repo.unfiltered(), 'manifest')
594 repocleartagscache()
595 repocleartagscache()
595 def t():
596 def t():
596 return len(repo.tags())
597 return len(repo.tags())
597 timer(t, setup=s)
598 timer(t, setup=s)
598 fm.end()
599 fm.end()
599
600
600 @command(b'perfancestors', formatteropts)
601 @command(b'perfancestors', formatteropts)
601 def perfancestors(ui, repo, **opts):
602 def perfancestors(ui, repo, **opts):
602 opts = _byteskwargs(opts)
603 opts = _byteskwargs(opts)
603 timer, fm = gettimer(ui, opts)
604 timer, fm = gettimer(ui, opts)
604 heads = repo.changelog.headrevs()
605 heads = repo.changelog.headrevs()
605 def d():
606 def d():
606 for a in repo.changelog.ancestors(heads):
607 for a in repo.changelog.ancestors(heads):
607 pass
608 pass
608 timer(d)
609 timer(d)
609 fm.end()
610 fm.end()
610
611
611 @command(b'perfancestorset', formatteropts)
612 @command(b'perfancestorset', formatteropts)
612 def perfancestorset(ui, repo, revset, **opts):
613 def perfancestorset(ui, repo, revset, **opts):
613 opts = _byteskwargs(opts)
614 opts = _byteskwargs(opts)
614 timer, fm = gettimer(ui, opts)
615 timer, fm = gettimer(ui, opts)
615 revs = repo.revs(revset)
616 revs = repo.revs(revset)
616 heads = repo.changelog.headrevs()
617 heads = repo.changelog.headrevs()
617 def d():
618 def d():
618 s = repo.changelog.ancestors(heads)
619 s = repo.changelog.ancestors(heads)
619 for rev in revs:
620 for rev in revs:
620 rev in s
621 rev in s
621 timer(d)
622 timer(d)
622 fm.end()
623 fm.end()
623
624
624 @command(b'perfdiscovery', formatteropts, b'PATH')
625 @command(b'perfdiscovery', formatteropts, b'PATH')
625 def perfdiscovery(ui, repo, path, **opts):
626 def perfdiscovery(ui, repo, path, **opts):
626 """benchmark discovery between local repo and the peer at given path
627 """benchmark discovery between local repo and the peer at given path
627 """
628 """
628 repos = [repo, None]
629 repos = [repo, None]
629 timer, fm = gettimer(ui, opts)
630 timer, fm = gettimer(ui, opts)
630 path = ui.expandpath(path)
631 path = ui.expandpath(path)
631
632
632 def s():
633 def s():
633 repos[1] = hg.peer(ui, opts, path)
634 repos[1] = hg.peer(ui, opts, path)
634 def d():
635 def d():
635 setdiscovery.findcommonheads(ui, *repos)
636 setdiscovery.findcommonheads(ui, *repos)
636 timer(d, setup=s)
637 timer(d, setup=s)
637 fm.end()
638 fm.end()
638
639
639 @command(b'perfbookmarks', formatteropts +
640 @command(b'perfbookmarks', formatteropts +
640 [
641 [
641 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
642 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
642 ])
643 ])
643 def perfbookmarks(ui, repo, **opts):
644 def perfbookmarks(ui, repo, **opts):
644 """benchmark parsing bookmarks from disk to memory"""
645 """benchmark parsing bookmarks from disk to memory"""
645 opts = _byteskwargs(opts)
646 opts = _byteskwargs(opts)
646 timer, fm = gettimer(ui, opts)
647 timer, fm = gettimer(ui, opts)
647
648
648 clearrevlogs = opts[b'clear_revlogs']
649 clearrevlogs = opts[b'clear_revlogs']
649 def s():
650 def s():
650 if clearrevlogs:
651 if clearrevlogs:
651 clearchangelog(repo)
652 clearchangelog(repo)
652 clearfilecache(repo, b'_bookmarks')
653 clearfilecache(repo, b'_bookmarks')
653 def d():
654 def d():
654 repo._bookmarks
655 repo._bookmarks
655 timer(d, setup=s)
656 timer(d, setup=s)
656 fm.end()
657 fm.end()
657
658
658 @command(b'perfbundleread', formatteropts, b'BUNDLE')
659 @command(b'perfbundleread', formatteropts, b'BUNDLE')
659 def perfbundleread(ui, repo, bundlepath, **opts):
660 def perfbundleread(ui, repo, bundlepath, **opts):
660 """Benchmark reading of bundle files.
661 """Benchmark reading of bundle files.
661
662
662 This command is meant to isolate the I/O part of bundle reading as
663 This command is meant to isolate the I/O part of bundle reading as
663 much as possible.
664 much as possible.
664 """
665 """
665 from mercurial import (
666 from mercurial import (
666 bundle2,
667 bundle2,
667 exchange,
668 exchange,
668 streamclone,
669 streamclone,
669 )
670 )
670
671
671 opts = _byteskwargs(opts)
672 opts = _byteskwargs(opts)
672
673
673 def makebench(fn):
674 def makebench(fn):
674 def run():
675 def run():
675 with open(bundlepath, b'rb') as fh:
676 with open(bundlepath, b'rb') as fh:
676 bundle = exchange.readbundle(ui, fh, bundlepath)
677 bundle = exchange.readbundle(ui, fh, bundlepath)
677 fn(bundle)
678 fn(bundle)
678
679
679 return run
680 return run
680
681
681 def makereadnbytes(size):
682 def makereadnbytes(size):
682 def run():
683 def run():
683 with open(bundlepath, b'rb') as fh:
684 with open(bundlepath, b'rb') as fh:
684 bundle = exchange.readbundle(ui, fh, bundlepath)
685 bundle = exchange.readbundle(ui, fh, bundlepath)
685 while bundle.read(size):
686 while bundle.read(size):
686 pass
687 pass
687
688
688 return run
689 return run
689
690
690 def makestdioread(size):
691 def makestdioread(size):
691 def run():
692 def run():
692 with open(bundlepath, b'rb') as fh:
693 with open(bundlepath, b'rb') as fh:
693 while fh.read(size):
694 while fh.read(size):
694 pass
695 pass
695
696
696 return run
697 return run
697
698
698 # bundle1
699 # bundle1
699
700
700 def deltaiter(bundle):
701 def deltaiter(bundle):
701 for delta in bundle.deltaiter():
702 for delta in bundle.deltaiter():
702 pass
703 pass
703
704
704 def iterchunks(bundle):
705 def iterchunks(bundle):
705 for chunk in bundle.getchunks():
706 for chunk in bundle.getchunks():
706 pass
707 pass
707
708
708 # bundle2
709 # bundle2
709
710
710 def forwardchunks(bundle):
711 def forwardchunks(bundle):
711 for chunk in bundle._forwardchunks():
712 for chunk in bundle._forwardchunks():
712 pass
713 pass
713
714
714 def iterparts(bundle):
715 def iterparts(bundle):
715 for part in bundle.iterparts():
716 for part in bundle.iterparts():
716 pass
717 pass
717
718
718 def iterpartsseekable(bundle):
719 def iterpartsseekable(bundle):
719 for part in bundle.iterparts(seekable=True):
720 for part in bundle.iterparts(seekable=True):
720 pass
721 pass
721
722
722 def seek(bundle):
723 def seek(bundle):
723 for part in bundle.iterparts(seekable=True):
724 for part in bundle.iterparts(seekable=True):
724 part.seek(0, os.SEEK_END)
725 part.seek(0, os.SEEK_END)
725
726
726 def makepartreadnbytes(size):
727 def makepartreadnbytes(size):
727 def run():
728 def run():
728 with open(bundlepath, b'rb') as fh:
729 with open(bundlepath, b'rb') as fh:
729 bundle = exchange.readbundle(ui, fh, bundlepath)
730 bundle = exchange.readbundle(ui, fh, bundlepath)
730 for part in bundle.iterparts():
731 for part in bundle.iterparts():
731 while part.read(size):
732 while part.read(size):
732 pass
733 pass
733
734
734 return run
735 return run
735
736
736 benches = [
737 benches = [
737 (makestdioread(8192), b'read(8k)'),
738 (makestdioread(8192), b'read(8k)'),
738 (makestdioread(16384), b'read(16k)'),
739 (makestdioread(16384), b'read(16k)'),
739 (makestdioread(32768), b'read(32k)'),
740 (makestdioread(32768), b'read(32k)'),
740 (makestdioread(131072), b'read(128k)'),
741 (makestdioread(131072), b'read(128k)'),
741 ]
742 ]
742
743
743 with open(bundlepath, b'rb') as fh:
744 with open(bundlepath, b'rb') as fh:
744 bundle = exchange.readbundle(ui, fh, bundlepath)
745 bundle = exchange.readbundle(ui, fh, bundlepath)
745
746
746 if isinstance(bundle, changegroup.cg1unpacker):
747 if isinstance(bundle, changegroup.cg1unpacker):
747 benches.extend([
748 benches.extend([
748 (makebench(deltaiter), b'cg1 deltaiter()'),
749 (makebench(deltaiter), b'cg1 deltaiter()'),
749 (makebench(iterchunks), b'cg1 getchunks()'),
750 (makebench(iterchunks), b'cg1 getchunks()'),
750 (makereadnbytes(8192), b'cg1 read(8k)'),
751 (makereadnbytes(8192), b'cg1 read(8k)'),
751 (makereadnbytes(16384), b'cg1 read(16k)'),
752 (makereadnbytes(16384), b'cg1 read(16k)'),
752 (makereadnbytes(32768), b'cg1 read(32k)'),
753 (makereadnbytes(32768), b'cg1 read(32k)'),
753 (makereadnbytes(131072), b'cg1 read(128k)'),
754 (makereadnbytes(131072), b'cg1 read(128k)'),
754 ])
755 ])
755 elif isinstance(bundle, bundle2.unbundle20):
756 elif isinstance(bundle, bundle2.unbundle20):
756 benches.extend([
757 benches.extend([
757 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
758 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
758 (makebench(iterparts), b'bundle2 iterparts()'),
759 (makebench(iterparts), b'bundle2 iterparts()'),
759 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
760 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
760 (makebench(seek), b'bundle2 part seek()'),
761 (makebench(seek), b'bundle2 part seek()'),
761 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
762 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
762 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
763 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
763 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
764 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
764 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
765 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
765 ])
766 ])
766 elif isinstance(bundle, streamclone.streamcloneapplier):
767 elif isinstance(bundle, streamclone.streamcloneapplier):
767 raise error.Abort(b'stream clone bundles not supported')
768 raise error.Abort(b'stream clone bundles not supported')
768 else:
769 else:
769 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
770 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
770
771
771 for fn, title in benches:
772 for fn, title in benches:
772 timer, fm = gettimer(ui, opts)
773 timer, fm = gettimer(ui, opts)
773 timer(fn, title=title)
774 timer(fn, title=title)
774 fm.end()
775 fm.end()
775
776
776 @command(b'perfchangegroupchangelog', formatteropts +
777 @command(b'perfchangegroupchangelog', formatteropts +
777 [(b'', b'cgversion', b'02', b'changegroup version'),
778 [(b'', b'cgversion', b'02', b'changegroup version'),
778 (b'r', b'rev', b'', b'revisions to add to changegroup')])
779 (b'r', b'rev', b'', b'revisions to add to changegroup')])
779 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
780 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
780 """Benchmark producing a changelog group for a changegroup.
781 """Benchmark producing a changelog group for a changegroup.
781
782
782 This measures the time spent processing the changelog during a
783 This measures the time spent processing the changelog during a
783 bundle operation. This occurs during `hg bundle` and on a server
784 bundle operation. This occurs during `hg bundle` and on a server
784 processing a `getbundle` wire protocol request (handles clones
785 processing a `getbundle` wire protocol request (handles clones
785 and pull requests).
786 and pull requests).
786
787
787 By default, all revisions are added to the changegroup.
788 By default, all revisions are added to the changegroup.
788 """
789 """
789 opts = _byteskwargs(opts)
790 opts = _byteskwargs(opts)
790 cl = repo.changelog
791 cl = repo.changelog
791 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
792 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
792 bundler = changegroup.getbundler(cgversion, repo)
793 bundler = changegroup.getbundler(cgversion, repo)
793
794
794 def d():
795 def d():
795 state, chunks = bundler._generatechangelog(cl, nodes)
796 state, chunks = bundler._generatechangelog(cl, nodes)
796 for chunk in chunks:
797 for chunk in chunks:
797 pass
798 pass
798
799
799 timer, fm = gettimer(ui, opts)
800 timer, fm = gettimer(ui, opts)
800
801
801 # Terminal printing can interfere with timing. So disable it.
802 # Terminal printing can interfere with timing. So disable it.
802 with ui.configoverride({(b'progress', b'disable'): True}):
803 with ui.configoverride({(b'progress', b'disable'): True}):
803 timer(d)
804 timer(d)
804
805
805 fm.end()
806 fm.end()
806
807
807 @command(b'perfdirs', formatteropts)
808 @command(b'perfdirs', formatteropts)
808 def perfdirs(ui, repo, **opts):
809 def perfdirs(ui, repo, **opts):
809 opts = _byteskwargs(opts)
810 opts = _byteskwargs(opts)
810 timer, fm = gettimer(ui, opts)
811 timer, fm = gettimer(ui, opts)
811 dirstate = repo.dirstate
812 dirstate = repo.dirstate
812 b'a' in dirstate
813 b'a' in dirstate
813 def d():
814 def d():
814 dirstate.hasdir(b'a')
815 dirstate.hasdir(b'a')
815 del dirstate._map._dirs
816 del dirstate._map._dirs
816 timer(d)
817 timer(d)
817 fm.end()
818 fm.end()
818
819
819 @command(b'perfdirstate', formatteropts)
820 @command(b'perfdirstate', formatteropts)
820 def perfdirstate(ui, repo, **opts):
821 def perfdirstate(ui, repo, **opts):
821 opts = _byteskwargs(opts)
822 opts = _byteskwargs(opts)
822 timer, fm = gettimer(ui, opts)
823 timer, fm = gettimer(ui, opts)
823 b"a" in repo.dirstate
824 b"a" in repo.dirstate
824 def d():
825 def d():
825 repo.dirstate.invalidate()
826 repo.dirstate.invalidate()
826 b"a" in repo.dirstate
827 b"a" in repo.dirstate
827 timer(d)
828 timer(d)
828 fm.end()
829 fm.end()
829
830
830 @command(b'perfdirstatedirs', formatteropts)
831 @command(b'perfdirstatedirs', formatteropts)
831 def perfdirstatedirs(ui, repo, **opts):
832 def perfdirstatedirs(ui, repo, **opts):
832 opts = _byteskwargs(opts)
833 opts = _byteskwargs(opts)
833 timer, fm = gettimer(ui, opts)
834 timer, fm = gettimer(ui, opts)
834 b"a" in repo.dirstate
835 b"a" in repo.dirstate
835 def d():
836 def d():
836 repo.dirstate.hasdir(b"a")
837 repo.dirstate.hasdir(b"a")
837 del repo.dirstate._map._dirs
838 del repo.dirstate._map._dirs
838 timer(d)
839 timer(d)
839 fm.end()
840 fm.end()
840
841
841 @command(b'perfdirstatefoldmap', formatteropts)
842 @command(b'perfdirstatefoldmap', formatteropts)
842 def perfdirstatefoldmap(ui, repo, **opts):
843 def perfdirstatefoldmap(ui, repo, **opts):
843 opts = _byteskwargs(opts)
844 opts = _byteskwargs(opts)
844 timer, fm = gettimer(ui, opts)
845 timer, fm = gettimer(ui, opts)
845 dirstate = repo.dirstate
846 dirstate = repo.dirstate
846 b'a' in dirstate
847 b'a' in dirstate
847 def d():
848 def d():
848 dirstate._map.filefoldmap.get(b'a')
849 dirstate._map.filefoldmap.get(b'a')
849 del dirstate._map.filefoldmap
850 del dirstate._map.filefoldmap
850 timer(d)
851 timer(d)
851 fm.end()
852 fm.end()
852
853
853 @command(b'perfdirfoldmap', formatteropts)
854 @command(b'perfdirfoldmap', formatteropts)
854 def perfdirfoldmap(ui, repo, **opts):
855 def perfdirfoldmap(ui, repo, **opts):
855 opts = _byteskwargs(opts)
856 opts = _byteskwargs(opts)
856 timer, fm = gettimer(ui, opts)
857 timer, fm = gettimer(ui, opts)
857 dirstate = repo.dirstate
858 dirstate = repo.dirstate
858 b'a' in dirstate
859 b'a' in dirstate
859 def d():
860 def d():
860 dirstate._map.dirfoldmap.get(b'a')
861 dirstate._map.dirfoldmap.get(b'a')
861 del dirstate._map.dirfoldmap
862 del dirstate._map.dirfoldmap
862 del dirstate._map._dirs
863 del dirstate._map._dirs
863 timer(d)
864 timer(d)
864 fm.end()
865 fm.end()
865
866
866 @command(b'perfdirstatewrite', formatteropts)
867 @command(b'perfdirstatewrite', formatteropts)
867 def perfdirstatewrite(ui, repo, **opts):
868 def perfdirstatewrite(ui, repo, **opts):
868 opts = _byteskwargs(opts)
869 opts = _byteskwargs(opts)
869 timer, fm = gettimer(ui, opts)
870 timer, fm = gettimer(ui, opts)
870 ds = repo.dirstate
871 ds = repo.dirstate
871 b"a" in ds
872 b"a" in ds
872 def d():
873 def d():
873 ds._dirty = True
874 ds._dirty = True
874 ds.write(repo.currenttransaction())
875 ds.write(repo.currenttransaction())
875 timer(d)
876 timer(d)
876 fm.end()
877 fm.end()
877
878
878 @command(b'perfmergecalculate',
879 @command(b'perfmergecalculate',
879 [(b'r', b'rev', b'.', b'rev to merge against')] + formatteropts)
880 [(b'r', b'rev', b'.', b'rev to merge against')] + formatteropts)
880 def perfmergecalculate(ui, repo, rev, **opts):
881 def perfmergecalculate(ui, repo, rev, **opts):
881 opts = _byteskwargs(opts)
882 opts = _byteskwargs(opts)
882 timer, fm = gettimer(ui, opts)
883 timer, fm = gettimer(ui, opts)
883 wctx = repo[None]
884 wctx = repo[None]
884 rctx = scmutil.revsingle(repo, rev, rev)
885 rctx = scmutil.revsingle(repo, rev, rev)
885 ancestor = wctx.ancestor(rctx)
886 ancestor = wctx.ancestor(rctx)
886 # we don't want working dir files to be stat'd in the benchmark, so prime
887 # we don't want working dir files to be stat'd in the benchmark, so prime
887 # that cache
888 # that cache
888 wctx.dirty()
889 wctx.dirty()
889 def d():
890 def d():
890 # acceptremote is True because we don't want prompts in the middle of
891 # acceptremote is True because we don't want prompts in the middle of
891 # our benchmark
892 # our benchmark
892 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
893 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
893 acceptremote=True, followcopies=True)
894 acceptremote=True, followcopies=True)
894 timer(d)
895 timer(d)
895 fm.end()
896 fm.end()
896
897
897 @command(b'perfpathcopies', [], b"REV REV")
898 @command(b'perfpathcopies', [], b"REV REV")
898 def perfpathcopies(ui, repo, rev1, rev2, **opts):
899 def perfpathcopies(ui, repo, rev1, rev2, **opts):
899 """benchmark the copy tracing logic"""
900 """benchmark the copy tracing logic"""
900 opts = _byteskwargs(opts)
901 opts = _byteskwargs(opts)
901 timer, fm = gettimer(ui, opts)
902 timer, fm = gettimer(ui, opts)
902 ctx1 = scmutil.revsingle(repo, rev1, rev1)
903 ctx1 = scmutil.revsingle(repo, rev1, rev1)
903 ctx2 = scmutil.revsingle(repo, rev2, rev2)
904 ctx2 = scmutil.revsingle(repo, rev2, rev2)
904 def d():
905 def d():
905 copies.pathcopies(ctx1, ctx2)
906 copies.pathcopies(ctx1, ctx2)
906 timer(d)
907 timer(d)
907 fm.end()
908 fm.end()
908
909
909 @command(b'perfphases',
910 @command(b'perfphases',
910 [(b'', b'full', False, b'include file reading time too'),
911 [(b'', b'full', False, b'include file reading time too'),
911 ], b"")
912 ], b"")
912 def perfphases(ui, repo, **opts):
913 def perfphases(ui, repo, **opts):
913 """benchmark phasesets computation"""
914 """benchmark phasesets computation"""
914 opts = _byteskwargs(opts)
915 opts = _byteskwargs(opts)
915 timer, fm = gettimer(ui, opts)
916 timer, fm = gettimer(ui, opts)
916 _phases = repo._phasecache
917 _phases = repo._phasecache
917 full = opts.get(b'full')
918 full = opts.get(b'full')
918 def d():
919 def d():
919 phases = _phases
920 phases = _phases
920 if full:
921 if full:
921 clearfilecache(repo, b'_phasecache')
922 clearfilecache(repo, b'_phasecache')
922 phases = repo._phasecache
923 phases = repo._phasecache
923 phases.invalidate()
924 phases.invalidate()
924 phases.loadphaserevs(repo)
925 phases.loadphaserevs(repo)
925 timer(d)
926 timer(d)
926 fm.end()
927 fm.end()
927
928
928 @command(b'perfphasesremote',
929 @command(b'perfphasesremote',
929 [], b"[DEST]")
930 [], b"[DEST]")
930 def perfphasesremote(ui, repo, dest=None, **opts):
931 def perfphasesremote(ui, repo, dest=None, **opts):
931 """benchmark time needed to analyse phases of the remote server"""
932 """benchmark time needed to analyse phases of the remote server"""
932 from mercurial.node import (
933 from mercurial.node import (
933 bin,
934 bin,
934 )
935 )
935 from mercurial import (
936 from mercurial import (
936 exchange,
937 exchange,
937 hg,
938 hg,
938 phases,
939 phases,
939 )
940 )
940 opts = _byteskwargs(opts)
941 opts = _byteskwargs(opts)
941 timer, fm = gettimer(ui, opts)
942 timer, fm = gettimer(ui, opts)
942
943
943 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
944 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
944 if not path:
945 if not path:
945 raise error.Abort((b'default repository not configured!'),
946 raise error.Abort((b'default repository not configured!'),
946 hint=(b"see 'hg help config.paths'"))
947 hint=(b"see 'hg help config.paths'"))
947 dest = path.pushloc or path.loc
948 dest = path.pushloc or path.loc
948 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
949 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
949 other = hg.peer(repo, opts, dest)
950 other = hg.peer(repo, opts, dest)
950
951
951 # easier to perform discovery through the operation
952 # easier to perform discovery through the operation
952 op = exchange.pushoperation(repo, other)
953 op = exchange.pushoperation(repo, other)
953 exchange._pushdiscoverychangeset(op)
954 exchange._pushdiscoverychangeset(op)
954
955
955 remotesubset = op.fallbackheads
956 remotesubset = op.fallbackheads
956
957
957 with other.commandexecutor() as e:
958 with other.commandexecutor() as e:
958 remotephases = e.callcommand(b'listkeys',
959 remotephases = e.callcommand(b'listkeys',
959 {b'namespace': b'phases'}).result()
960 {b'namespace': b'phases'}).result()
960 del other
961 del other
961 publishing = remotephases.get(b'publishing', False)
962 publishing = remotephases.get(b'publishing', False)
962 if publishing:
963 if publishing:
963 ui.status((b'publishing: yes\n'))
964 ui.status((b'publishing: yes\n'))
964 else:
965 else:
965 ui.status((b'publishing: no\n'))
966 ui.status((b'publishing: no\n'))
966
967
967 nodemap = repo.changelog.nodemap
968 nodemap = repo.changelog.nodemap
968 nonpublishroots = 0
969 nonpublishroots = 0
969 for nhex, phase in remotephases.iteritems():
970 for nhex, phase in remotephases.iteritems():
970 if nhex == b'publishing': # ignore data related to publish option
971 if nhex == b'publishing': # ignore data related to publish option
971 continue
972 continue
972 node = bin(nhex)
973 node = bin(nhex)
973 if node in nodemap and int(phase):
974 if node in nodemap and int(phase):
974 nonpublishroots += 1
975 nonpublishroots += 1
975 ui.status((b'number of roots: %d\n') % len(remotephases))
976 ui.status((b'number of roots: %d\n') % len(remotephases))
976 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
977 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
977 def d():
978 def d():
978 phases.remotephasessummary(repo,
979 phases.remotephasessummary(repo,
979 remotesubset,
980 remotesubset,
980 remotephases)
981 remotephases)
981 timer(d)
982 timer(d)
982 fm.end()
983 fm.end()
983
984
984 @command(b'perfmanifest',[
985 @command(b'perfmanifest',[
985 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
986 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
986 (b'', b'clear-disk', False, b'clear on-disk caches too'),
987 (b'', b'clear-disk', False, b'clear on-disk caches too'),
987 ] + formatteropts, b'REV|NODE')
988 ] + formatteropts, b'REV|NODE')
988 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
989 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
989 """benchmark the time to read a manifest from disk and return a usable
990 """benchmark the time to read a manifest from disk and return a usable
990 dict-like object
991 dict-like object
991
992
992 Manifest caches are cleared before retrieval."""
993 Manifest caches are cleared before retrieval."""
993 opts = _byteskwargs(opts)
994 opts = _byteskwargs(opts)
994 timer, fm = gettimer(ui, opts)
995 timer, fm = gettimer(ui, opts)
995 if not manifest_rev:
996 if not manifest_rev:
996 ctx = scmutil.revsingle(repo, rev, rev)
997 ctx = scmutil.revsingle(repo, rev, rev)
997 t = ctx.manifestnode()
998 t = ctx.manifestnode()
998 else:
999 else:
999 from mercurial.node import bin
1000 from mercurial.node import bin
1000
1001
1001 if len(rev) == 40:
1002 if len(rev) == 40:
1002 t = bin(rev)
1003 t = bin(rev)
1003 else:
1004 else:
1004 try:
1005 try:
1005 rev = int(rev)
1006 rev = int(rev)
1006
1007
1007 if util.safehasattr(repo.manifestlog, b'getstorage'):
1008 if util.safehasattr(repo.manifestlog, b'getstorage'):
1008 t = repo.manifestlog.getstorage(b'').node(rev)
1009 t = repo.manifestlog.getstorage(b'').node(rev)
1009 else:
1010 else:
1010 t = repo.manifestlog._revlog.lookup(rev)
1011 t = repo.manifestlog._revlog.lookup(rev)
1011 except ValueError:
1012 except ValueError:
1012 raise error.Abort(b'manifest revision must be integer or full '
1013 raise error.Abort(b'manifest revision must be integer or full '
1013 b'node')
1014 b'node')
1014 def d():
1015 def d():
1015 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1016 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1016 repo.manifestlog[t].read()
1017 repo.manifestlog[t].read()
1017 timer(d)
1018 timer(d)
1018 fm.end()
1019 fm.end()
1019
1020
1020 @command(b'perfchangeset', formatteropts)
1021 @command(b'perfchangeset', formatteropts)
1021 def perfchangeset(ui, repo, rev, **opts):
1022 def perfchangeset(ui, repo, rev, **opts):
1022 opts = _byteskwargs(opts)
1023 opts = _byteskwargs(opts)
1023 timer, fm = gettimer(ui, opts)
1024 timer, fm = gettimer(ui, opts)
1024 n = scmutil.revsingle(repo, rev).node()
1025 n = scmutil.revsingle(repo, rev).node()
1025 def d():
1026 def d():
1026 repo.changelog.read(n)
1027 repo.changelog.read(n)
1027 #repo.changelog._cache = None
1028 #repo.changelog._cache = None
1028 timer(d)
1029 timer(d)
1029 fm.end()
1030 fm.end()
1030
1031
1031 @command(b'perfignore', formatteropts)
1032 @command(b'perfignore', formatteropts)
1032 def perfignore(ui, repo, **opts):
1033 def perfignore(ui, repo, **opts):
1033 """benchmark operation related to computing ignore"""
1034 """benchmark operation related to computing ignore"""
1034 opts = _byteskwargs(opts)
1035 opts = _byteskwargs(opts)
1035 timer, fm = gettimer(ui, opts)
1036 timer, fm = gettimer(ui, opts)
1036 dirstate = repo.dirstate
1037 dirstate = repo.dirstate
1037
1038
1038 def setupone():
1039 def setupone():
1039 dirstate.invalidate()
1040 dirstate.invalidate()
1040 clearfilecache(dirstate, b'_ignore')
1041 clearfilecache(dirstate, b'_ignore')
1041
1042
1042 def runone():
1043 def runone():
1043 dirstate._ignore
1044 dirstate._ignore
1044
1045
1045 timer(runone, setup=setupone, title=b"load")
1046 timer(runone, setup=setupone, title=b"load")
1046 fm.end()
1047 fm.end()
1047
1048
1048 @command(b'perfindex', [
1049 @command(b'perfindex', [
1049 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1050 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1050 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1051 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1051 ] + formatteropts)
1052 ] + formatteropts)
1052 def perfindex(ui, repo, **opts):
1053 def perfindex(ui, repo, **opts):
1053 """benchmark index creation time followed by a lookup
1054 """benchmark index creation time followed by a lookup
1054
1055
1055 The default is to look `tip` up. Depending on the index implementation,
1056 The default is to look `tip` up. Depending on the index implementation,
1056 the revision looked up can matters. For example, an implementation
1057 the revision looked up can matters. For example, an implementation
1057 scanning the index will have a faster lookup time for `--rev tip` than for
1058 scanning the index will have a faster lookup time for `--rev tip` than for
1058 `--rev 0`. The number of looked up revisions and their order can also
1059 `--rev 0`. The number of looked up revisions and their order can also
1059 matters.
1060 matters.
1060
1061
1061 Example of useful set to test:
1062 Example of useful set to test:
1062 * tip
1063 * tip
1063 * 0
1064 * 0
1064 * -10:
1065 * -10:
1065 * :10
1066 * :10
1066 * -10: + :10
1067 * -10: + :10
1067 * :10: + -10:
1068 * :10: + -10:
1068 * -10000:
1069 * -10000:
1069 * -10000: + 0
1070 * -10000: + 0
1070
1071
1071 It is not currently possible to check for lookup of a missing node. For
1072 It is not currently possible to check for lookup of a missing node. For
1072 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1073 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1073 import mercurial.revlog
1074 import mercurial.revlog
1074 opts = _byteskwargs(opts)
1075 opts = _byteskwargs(opts)
1075 timer, fm = gettimer(ui, opts)
1076 timer, fm = gettimer(ui, opts)
1076 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1077 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1077 if opts[b'no_lookup']:
1078 if opts[b'no_lookup']:
1078 if opts['rev']:
1079 if opts['rev']:
1079 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1080 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1080 nodes = []
1081 nodes = []
1081 elif not opts[b'rev']:
1082 elif not opts[b'rev']:
1082 nodes = [repo[b"tip"].node()]
1083 nodes = [repo[b"tip"].node()]
1083 else:
1084 else:
1084 revs = scmutil.revrange(repo, opts[b'rev'])
1085 revs = scmutil.revrange(repo, opts[b'rev'])
1085 cl = repo.changelog
1086 cl = repo.changelog
1086 nodes = [cl.node(r) for r in revs]
1087 nodes = [cl.node(r) for r in revs]
1087
1088
1088 unfi = repo.unfiltered()
1089 unfi = repo.unfiltered()
1089 # find the filecache func directly
1090 # find the filecache func directly
1090 # This avoid polluting the benchmark with the filecache logic
1091 # This avoid polluting the benchmark with the filecache logic
1091 makecl = unfi.__class__.changelog.func
1092 makecl = unfi.__class__.changelog.func
1092 def setup():
1093 def setup():
1093 # probably not necessary, but for good measure
1094 # probably not necessary, but for good measure
1094 clearchangelog(unfi)
1095 clearchangelog(unfi)
1095 def d():
1096 def d():
1096 cl = makecl(unfi)
1097 cl = makecl(unfi)
1097 for n in nodes:
1098 for n in nodes:
1098 cl.rev(n)
1099 cl.rev(n)
1099 timer(d, setup=setup)
1100 timer(d, setup=setup)
1100 fm.end()
1101 fm.end()
1101
1102
1102 @command(b'perfnodemap', [
1103 @command(b'perfnodemap', [
1103 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1104 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1104 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1105 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1105 ] + formatteropts)
1106 ] + formatteropts)
1106 def perfnodemap(ui, repo, **opts):
1107 def perfnodemap(ui, repo, **opts):
1107 """benchmark the time necessary to look up revision from a cold nodemap
1108 """benchmark the time necessary to look up revision from a cold nodemap
1108
1109
1109 Depending on the implementation, the amount and order of revision we look
1110 Depending on the implementation, the amount and order of revision we look
1110 up can varies. Example of useful set to test:
1111 up can varies. Example of useful set to test:
1111 * tip
1112 * tip
1112 * 0
1113 * 0
1113 * -10:
1114 * -10:
1114 * :10
1115 * :10
1115 * -10: + :10
1116 * -10: + :10
1116 * :10: + -10:
1117 * :10: + -10:
1117 * -10000:
1118 * -10000:
1118 * -10000: + 0
1119 * -10000: + 0
1119
1120
1120 The command currently focus on valid binary lookup. Benchmarking for
1121 The command currently focus on valid binary lookup. Benchmarking for
1121 hexlookup, prefix lookup and missing lookup would also be valuable.
1122 hexlookup, prefix lookup and missing lookup would also be valuable.
1122 """
1123 """
1123 import mercurial.revlog
1124 import mercurial.revlog
1124 opts = _byteskwargs(opts)
1125 opts = _byteskwargs(opts)
1125 timer, fm = gettimer(ui, opts)
1126 timer, fm = gettimer(ui, opts)
1126 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1127 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1127
1128
1128 unfi = repo.unfiltered()
1129 unfi = repo.unfiltered()
1129 clearcaches = opts['clear_caches']
1130 clearcaches = opts['clear_caches']
1130 # find the filecache func directly
1131 # find the filecache func directly
1131 # This avoid polluting the benchmark with the filecache logic
1132 # This avoid polluting the benchmark with the filecache logic
1132 makecl = unfi.__class__.changelog.func
1133 makecl = unfi.__class__.changelog.func
1133 if not opts[b'rev']:
1134 if not opts[b'rev']:
1134 raise error.Abort('use --rev to specify revisions to look up')
1135 raise error.Abort('use --rev to specify revisions to look up')
1135 revs = scmutil.revrange(repo, opts[b'rev'])
1136 revs = scmutil.revrange(repo, opts[b'rev'])
1136 cl = repo.changelog
1137 cl = repo.changelog
1137 nodes = [cl.node(r) for r in revs]
1138 nodes = [cl.node(r) for r in revs]
1138
1139
1139 # use a list to pass reference to a nodemap from one closure to the next
1140 # use a list to pass reference to a nodemap from one closure to the next
1140 nodeget = [None]
1141 nodeget = [None]
1141 def setnodeget():
1142 def setnodeget():
1142 # probably not necessary, but for good measure
1143 # probably not necessary, but for good measure
1143 clearchangelog(unfi)
1144 clearchangelog(unfi)
1144 nodeget[0] = makecl(unfi).nodemap.get
1145 nodeget[0] = makecl(unfi).nodemap.get
1145
1146
1146 def d():
1147 def d():
1147 get = nodeget[0]
1148 get = nodeget[0]
1148 for n in nodes:
1149 for n in nodes:
1149 get(n)
1150 get(n)
1150
1151
1151 setup = None
1152 setup = None
1152 if clearcaches:
1153 if clearcaches:
1153 def setup():
1154 def setup():
1154 setnodeget()
1155 setnodeget()
1155 else:
1156 else:
1156 setnodeget()
1157 setnodeget()
1157 d() # prewarm the data structure
1158 d() # prewarm the data structure
1158 timer(d, setup=setup)
1159 timer(d, setup=setup)
1159 fm.end()
1160 fm.end()
1160
1161
1161 @command(b'perfstartup', formatteropts)
1162 @command(b'perfstartup', formatteropts)
1162 def perfstartup(ui, repo, **opts):
1163 def perfstartup(ui, repo, **opts):
1163 opts = _byteskwargs(opts)
1164 opts = _byteskwargs(opts)
1164 timer, fm = gettimer(ui, opts)
1165 timer, fm = gettimer(ui, opts)
1165 def d():
1166 def d():
1166 if os.name != r'nt':
1167 if os.name != r'nt':
1167 os.system(b"HGRCPATH= %s version -q > /dev/null" %
1168 os.system(b"HGRCPATH= %s version -q > /dev/null" %
1168 fsencode(sys.argv[0]))
1169 fsencode(sys.argv[0]))
1169 else:
1170 else:
1170 os.environ[r'HGRCPATH'] = r' '
1171 os.environ[r'HGRCPATH'] = r' '
1171 os.system(r"%s version -q > NUL" % sys.argv[0])
1172 os.system(r"%s version -q > NUL" % sys.argv[0])
1172 timer(d)
1173 timer(d)
1173 fm.end()
1174 fm.end()
1174
1175
1175 @command(b'perfparents', formatteropts)
1176 @command(b'perfparents', formatteropts)
1176 def perfparents(ui, repo, **opts):
1177 def perfparents(ui, repo, **opts):
1177 """benchmark the time necessary to fetch one changeset's parents.
1178 """benchmark the time necessary to fetch one changeset's parents.
1178
1179
1179 The fetch is done using the `node identifier`, traversing all object layer
1180 The fetch is done using the `node identifier`, traversing all object layer
1180 from the repository object. The N first revision will be used for this
1181 from the repository object. The N first revision will be used for this
1181 benchmark. N is controlled by the ``perf.parentscount`` config option
1182 benchmark. N is controlled by the ``perf.parentscount`` config option
1182 (default: 1000).
1183 (default: 1000).
1183 """
1184 """
1184 opts = _byteskwargs(opts)
1185 opts = _byteskwargs(opts)
1185 timer, fm = gettimer(ui, opts)
1186 timer, fm = gettimer(ui, opts)
1186 # control the number of commits perfparents iterates over
1187 # control the number of commits perfparents iterates over
1187 # experimental config: perf.parentscount
1188 # experimental config: perf.parentscount
1188 count = getint(ui, b"perf", b"parentscount", 1000)
1189 count = getint(ui, b"perf", b"parentscount", 1000)
1189 if len(repo.changelog) < count:
1190 if len(repo.changelog) < count:
1190 raise error.Abort(b"repo needs %d commits for this test" % count)
1191 raise error.Abort(b"repo needs %d commits for this test" % count)
1191 repo = repo.unfiltered()
1192 repo = repo.unfiltered()
1192 nl = [repo.changelog.node(i) for i in _xrange(count)]
1193 nl = [repo.changelog.node(i) for i in _xrange(count)]
1193 def d():
1194 def d():
1194 for n in nl:
1195 for n in nl:
1195 repo.changelog.parents(n)
1196 repo.changelog.parents(n)
1196 timer(d)
1197 timer(d)
1197 fm.end()
1198 fm.end()
1198
1199
1199 @command(b'perfctxfiles', formatteropts)
1200 @command(b'perfctxfiles', formatteropts)
1200 def perfctxfiles(ui, repo, x, **opts):
1201 def perfctxfiles(ui, repo, x, **opts):
1201 opts = _byteskwargs(opts)
1202 opts = _byteskwargs(opts)
1202 x = int(x)
1203 x = int(x)
1203 timer, fm = gettimer(ui, opts)
1204 timer, fm = gettimer(ui, opts)
1204 def d():
1205 def d():
1205 len(repo[x].files())
1206 len(repo[x].files())
1206 timer(d)
1207 timer(d)
1207 fm.end()
1208 fm.end()
1208
1209
1209 @command(b'perfrawfiles', formatteropts)
1210 @command(b'perfrawfiles', formatteropts)
1210 def perfrawfiles(ui, repo, x, **opts):
1211 def perfrawfiles(ui, repo, x, **opts):
1211 opts = _byteskwargs(opts)
1212 opts = _byteskwargs(opts)
1212 x = int(x)
1213 x = int(x)
1213 timer, fm = gettimer(ui, opts)
1214 timer, fm = gettimer(ui, opts)
1214 cl = repo.changelog
1215 cl = repo.changelog
1215 def d():
1216 def d():
1216 len(cl.read(x)[3])
1217 len(cl.read(x)[3])
1217 timer(d)
1218 timer(d)
1218 fm.end()
1219 fm.end()
1219
1220
1220 @command(b'perflookup', formatteropts)
1221 @command(b'perflookup', formatteropts)
1221 def perflookup(ui, repo, rev, **opts):
1222 def perflookup(ui, repo, rev, **opts):
1222 opts = _byteskwargs(opts)
1223 opts = _byteskwargs(opts)
1223 timer, fm = gettimer(ui, opts)
1224 timer, fm = gettimer(ui, opts)
1224 timer(lambda: len(repo.lookup(rev)))
1225 timer(lambda: len(repo.lookup(rev)))
1225 fm.end()
1226 fm.end()
1226
1227
1227 @command(b'perflinelogedits',
1228 @command(b'perflinelogedits',
1228 [(b'n', b'edits', 10000, b'number of edits'),
1229 [(b'n', b'edits', 10000, b'number of edits'),
1229 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1230 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1230 ], norepo=True)
1231 ], norepo=True)
1231 def perflinelogedits(ui, **opts):
1232 def perflinelogedits(ui, **opts):
1232 from mercurial import linelog
1233 from mercurial import linelog
1233
1234
1234 opts = _byteskwargs(opts)
1235 opts = _byteskwargs(opts)
1235
1236
1236 edits = opts[b'edits']
1237 edits = opts[b'edits']
1237 maxhunklines = opts[b'max_hunk_lines']
1238 maxhunklines = opts[b'max_hunk_lines']
1238
1239
1239 maxb1 = 100000
1240 maxb1 = 100000
1240 random.seed(0)
1241 random.seed(0)
1241 randint = random.randint
1242 randint = random.randint
1242 currentlines = 0
1243 currentlines = 0
1243 arglist = []
1244 arglist = []
1244 for rev in _xrange(edits):
1245 for rev in _xrange(edits):
1245 a1 = randint(0, currentlines)
1246 a1 = randint(0, currentlines)
1246 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1247 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1247 b1 = randint(0, maxb1)
1248 b1 = randint(0, maxb1)
1248 b2 = randint(b1, b1 + maxhunklines)
1249 b2 = randint(b1, b1 + maxhunklines)
1249 currentlines += (b2 - b1) - (a2 - a1)
1250 currentlines += (b2 - b1) - (a2 - a1)
1250 arglist.append((rev, a1, a2, b1, b2))
1251 arglist.append((rev, a1, a2, b1, b2))
1251
1252
1252 def d():
1253 def d():
1253 ll = linelog.linelog()
1254 ll = linelog.linelog()
1254 for args in arglist:
1255 for args in arglist:
1255 ll.replacelines(*args)
1256 ll.replacelines(*args)
1256
1257
1257 timer, fm = gettimer(ui, opts)
1258 timer, fm = gettimer(ui, opts)
1258 timer(d)
1259 timer(d)
1259 fm.end()
1260 fm.end()
1260
1261
1261 @command(b'perfrevrange', formatteropts)
1262 @command(b'perfrevrange', formatteropts)
1262 def perfrevrange(ui, repo, *specs, **opts):
1263 def perfrevrange(ui, repo, *specs, **opts):
1263 opts = _byteskwargs(opts)
1264 opts = _byteskwargs(opts)
1264 timer, fm = gettimer(ui, opts)
1265 timer, fm = gettimer(ui, opts)
1265 revrange = scmutil.revrange
1266 revrange = scmutil.revrange
1266 timer(lambda: len(revrange(repo, specs)))
1267 timer(lambda: len(revrange(repo, specs)))
1267 fm.end()
1268 fm.end()
1268
1269
1269 @command(b'perfnodelookup', formatteropts)
1270 @command(b'perfnodelookup', formatteropts)
1270 def perfnodelookup(ui, repo, rev, **opts):
1271 def perfnodelookup(ui, repo, rev, **opts):
1271 opts = _byteskwargs(opts)
1272 opts = _byteskwargs(opts)
1272 timer, fm = gettimer(ui, opts)
1273 timer, fm = gettimer(ui, opts)
1273 import mercurial.revlog
1274 import mercurial.revlog
1274 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1275 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1275 n = scmutil.revsingle(repo, rev).node()
1276 n = scmutil.revsingle(repo, rev).node()
1276 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1277 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1277 def d():
1278 def d():
1278 cl.rev(n)
1279 cl.rev(n)
1279 clearcaches(cl)
1280 clearcaches(cl)
1280 timer(d)
1281 timer(d)
1281 fm.end()
1282 fm.end()
1282
1283
1283 @command(b'perflog',
1284 @command(b'perflog',
1284 [(b'', b'rename', False, b'ask log to follow renames')
1285 [(b'', b'rename', False, b'ask log to follow renames')
1285 ] + formatteropts)
1286 ] + formatteropts)
1286 def perflog(ui, repo, rev=None, **opts):
1287 def perflog(ui, repo, rev=None, **opts):
1287 opts = _byteskwargs(opts)
1288 opts = _byteskwargs(opts)
1288 if rev is None:
1289 if rev is None:
1289 rev=[]
1290 rev=[]
1290 timer, fm = gettimer(ui, opts)
1291 timer, fm = gettimer(ui, opts)
1291 ui.pushbuffer()
1292 ui.pushbuffer()
1292 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1293 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1293 copies=opts.get(b'rename')))
1294 copies=opts.get(b'rename')))
1294 ui.popbuffer()
1295 ui.popbuffer()
1295 fm.end()
1296 fm.end()
1296
1297
1297 @command(b'perfmoonwalk', formatteropts)
1298 @command(b'perfmoonwalk', formatteropts)
1298 def perfmoonwalk(ui, repo, **opts):
1299 def perfmoonwalk(ui, repo, **opts):
1299 """benchmark walking the changelog backwards
1300 """benchmark walking the changelog backwards
1300
1301
1301 This also loads the changelog data for each revision in the changelog.
1302 This also loads the changelog data for each revision in the changelog.
1302 """
1303 """
1303 opts = _byteskwargs(opts)
1304 opts = _byteskwargs(opts)
1304 timer, fm = gettimer(ui, opts)
1305 timer, fm = gettimer(ui, opts)
1305 def moonwalk():
1306 def moonwalk():
1306 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1307 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1307 ctx = repo[i]
1308 ctx = repo[i]
1308 ctx.branch() # read changelog data (in addition to the index)
1309 ctx.branch() # read changelog data (in addition to the index)
1309 timer(moonwalk)
1310 timer(moonwalk)
1310 fm.end()
1311 fm.end()
1311
1312
1312 @command(b'perftemplating',
1313 @command(b'perftemplating',
1313 [(b'r', b'rev', [], b'revisions to run the template on'),
1314 [(b'r', b'rev', [], b'revisions to run the template on'),
1314 ] + formatteropts)
1315 ] + formatteropts)
1315 def perftemplating(ui, repo, testedtemplate=None, **opts):
1316 def perftemplating(ui, repo, testedtemplate=None, **opts):
1316 """test the rendering time of a given template"""
1317 """test the rendering time of a given template"""
1317 if makelogtemplater is None:
1318 if makelogtemplater is None:
1318 raise error.Abort((b"perftemplating not available with this Mercurial"),
1319 raise error.Abort((b"perftemplating not available with this Mercurial"),
1319 hint=b"use 4.3 or later")
1320 hint=b"use 4.3 or later")
1320
1321
1321 opts = _byteskwargs(opts)
1322 opts = _byteskwargs(opts)
1322
1323
1323 nullui = ui.copy()
1324 nullui = ui.copy()
1324 nullui.fout = open(os.devnull, r'wb')
1325 nullui.fout = open(os.devnull, r'wb')
1325 nullui.disablepager()
1326 nullui.disablepager()
1326 revs = opts.get(b'rev')
1327 revs = opts.get(b'rev')
1327 if not revs:
1328 if not revs:
1328 revs = [b'all()']
1329 revs = [b'all()']
1329 revs = list(scmutil.revrange(repo, revs))
1330 revs = list(scmutil.revrange(repo, revs))
1330
1331
1331 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1332 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1332 b' {author|person}: {desc|firstline}\n')
1333 b' {author|person}: {desc|firstline}\n')
1333 if testedtemplate is None:
1334 if testedtemplate is None:
1334 testedtemplate = defaulttemplate
1335 testedtemplate = defaulttemplate
1335 displayer = makelogtemplater(nullui, repo, testedtemplate)
1336 displayer = makelogtemplater(nullui, repo, testedtemplate)
1336 def format():
1337 def format():
1337 for r in revs:
1338 for r in revs:
1338 ctx = repo[r]
1339 ctx = repo[r]
1339 displayer.show(ctx)
1340 displayer.show(ctx)
1340 displayer.flush(ctx)
1341 displayer.flush(ctx)
1341
1342
1342 timer, fm = gettimer(ui, opts)
1343 timer, fm = gettimer(ui, opts)
1343 timer(format)
1344 timer(format)
1344 fm.end()
1345 fm.end()
1345
1346
1346 @command(b'perfhelper-pathcopies', formatteropts +
1347 @command(b'perfhelper-pathcopies', formatteropts +
1347 [
1348 [
1348 (b'r', b'revs', [], b'restrict search to these revisions'),
1349 (b'r', b'revs', [], b'restrict search to these revisions'),
1349 (b'', b'timing', False, b'provides extra data (costly)'),
1350 (b'', b'timing', False, b'provides extra data (costly)'),
1350 ])
1351 ])
1351 def perfhelperpathcopies(ui, repo, revs=[], **opts):
1352 def perfhelperpathcopies(ui, repo, revs=[], **opts):
1352 """find statistic about potential parameters for the `perftracecopies`
1353 """find statistic about potential parameters for the `perftracecopies`
1353
1354
1354 This command find source-destination pair relevant for copytracing testing.
1355 This command find source-destination pair relevant for copytracing testing.
1355 It report value for some of the parameters that impact copy tracing time.
1356 It report value for some of the parameters that impact copy tracing time.
1356
1357
1357 If `--timing` is set, rename detection is run and the associated timing
1358 If `--timing` is set, rename detection is run and the associated timing
1358 will be reported. The extra details comes at the cost of a slower command
1359 will be reported. The extra details comes at the cost of a slower command
1359 execution.
1360 execution.
1360
1361
1361 Since the rename detection is only run once, other factors might easily
1362 Since the rename detection is only run once, other factors might easily
1362 affect the precision of the timing. However it should give a good
1363 affect the precision of the timing. However it should give a good
1363 approximation of which revision pairs are very costly.
1364 approximation of which revision pairs are very costly.
1364 """
1365 """
1365 opts = _byteskwargs(opts)
1366 opts = _byteskwargs(opts)
1366 fm = ui.formatter(b'perf', opts)
1367 fm = ui.formatter(b'perf', opts)
1367 dotiming = opts[b'timing']
1368 dotiming = opts[b'timing']
1368
1369
1369 if dotiming:
1370 if dotiming:
1370 header = '%12s %12s %12s %12s %12s %12s\n'
1371 header = '%12s %12s %12s %12s %12s %12s\n'
1371 output = ("%(source)12s %(destination)12s "
1372 output = ("%(source)12s %(destination)12s "
1372 "%(nbrevs)12d %(nbmissingfiles)12d "
1373 "%(nbrevs)12d %(nbmissingfiles)12d "
1373 "%(nbrenamedfiles)12d %(time)18.5f\n")
1374 "%(nbrenamedfiles)12d %(time)18.5f\n")
1374 header_names = ("source", "destination", "nb-revs", "nb-files",
1375 header_names = ("source", "destination", "nb-revs", "nb-files",
1375 "nb-renames", "time")
1376 "nb-renames", "time")
1376 fm.plain(header % header_names)
1377 fm.plain(header % header_names)
1377 else:
1378 else:
1378 header = '%12s %12s %12s %12s\n'
1379 header = '%12s %12s %12s %12s\n'
1379 output = ("%(source)12s %(destination)12s "
1380 output = ("%(source)12s %(destination)12s "
1380 "%(nbrevs)12d %(nbmissingfiles)12d\n")
1381 "%(nbrevs)12d %(nbmissingfiles)12d\n")
1381 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
1382 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
1382
1383
1383 if not revs:
1384 if not revs:
1384 revs = ['all()']
1385 revs = ['all()']
1385 revs = scmutil.revrange(repo, revs)
1386 revs = scmutil.revrange(repo, revs)
1386
1387
1387 roi = repo.revs('merge() and %ld', revs)
1388 roi = repo.revs('merge() and %ld', revs)
1388 for r in roi:
1389 for r in roi:
1389 ctx = repo[r]
1390 ctx = repo[r]
1390 p1 = ctx.p1().rev()
1391 p1 = ctx.p1().rev()
1391 p2 = ctx.p2().rev()
1392 p2 = ctx.p2().rev()
1392 bases = repo.changelog._commonancestorsheads(p1, p2)
1393 bases = repo.changelog._commonancestorsheads(p1, p2)
1393 for p in (p1, p2):
1394 for p in (p1, p2):
1394 for b in bases:
1395 for b in bases:
1395 base = repo[b]
1396 base = repo[b]
1396 parent = repo[p]
1397 parent = repo[p]
1397 missing = copies._computeforwardmissing(base, parent)
1398 missing = copies._computeforwardmissing(base, parent)
1398 if not missing:
1399 if not missing:
1399 continue
1400 continue
1400 data = {
1401 data = {
1401 b'source': base.hex(),
1402 b'source': base.hex(),
1402 b'destination': parent.hex(),
1403 b'destination': parent.hex(),
1403 b'nbrevs': len(repo.revs('%d::%d', b, p)),
1404 b'nbrevs': len(repo.revs('%d::%d', b, p)),
1404 b'nbmissingfiles': len(missing),
1405 b'nbmissingfiles': len(missing),
1405 }
1406 }
1406 if dotiming:
1407 if dotiming:
1407 begin = util.timer()
1408 begin = util.timer()
1408 renames = copies.pathcopies(base, parent)
1409 renames = copies.pathcopies(base, parent)
1409 end = util.timer()
1410 end = util.timer()
1410 # not very stable timing since we did only one run
1411 # not very stable timing since we did only one run
1411 data['time'] = end - begin
1412 data['time'] = end - begin
1412 data['nbrenamedfiles'] = len(renames)
1413 data['nbrenamedfiles'] = len(renames)
1413 fm.startitem()
1414 fm.startitem()
1414 fm.data(**data)
1415 fm.data(**data)
1415 out = data.copy()
1416 out = data.copy()
1416 out['source'] = fm.hexfunc(base.node())
1417 out['source'] = fm.hexfunc(base.node())
1417 out['destination'] = fm.hexfunc(parent.node())
1418 out['destination'] = fm.hexfunc(parent.node())
1418 fm.plain(output % out)
1419 fm.plain(output % out)
1419
1420
1420 fm.end()
1421 fm.end()
1421
1422
1422 @command(b'perfcca', formatteropts)
1423 @command(b'perfcca', formatteropts)
1423 def perfcca(ui, repo, **opts):
1424 def perfcca(ui, repo, **opts):
1424 opts = _byteskwargs(opts)
1425 opts = _byteskwargs(opts)
1425 timer, fm = gettimer(ui, opts)
1426 timer, fm = gettimer(ui, opts)
1426 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1427 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1427 fm.end()
1428 fm.end()
1428
1429
1429 @command(b'perffncacheload', formatteropts)
1430 @command(b'perffncacheload', formatteropts)
1430 def perffncacheload(ui, repo, **opts):
1431 def perffncacheload(ui, repo, **opts):
1431 opts = _byteskwargs(opts)
1432 opts = _byteskwargs(opts)
1432 timer, fm = gettimer(ui, opts)
1433 timer, fm = gettimer(ui, opts)
1433 s = repo.store
1434 s = repo.store
1434 def d():
1435 def d():
1435 s.fncache._load()
1436 s.fncache._load()
1436 timer(d)
1437 timer(d)
1437 fm.end()
1438 fm.end()
1438
1439
1439 @command(b'perffncachewrite', formatteropts)
1440 @command(b'perffncachewrite', formatteropts)
1440 def perffncachewrite(ui, repo, **opts):
1441 def perffncachewrite(ui, repo, **opts):
1441 opts = _byteskwargs(opts)
1442 opts = _byteskwargs(opts)
1442 timer, fm = gettimer(ui, opts)
1443 timer, fm = gettimer(ui, opts)
1443 s = repo.store
1444 s = repo.store
1444 lock = repo.lock()
1445 lock = repo.lock()
1445 s.fncache._load()
1446 s.fncache._load()
1446 tr = repo.transaction(b'perffncachewrite')
1447 tr = repo.transaction(b'perffncachewrite')
1447 tr.addbackup(b'fncache')
1448 tr.addbackup(b'fncache')
1448 def d():
1449 def d():
1449 s.fncache._dirty = True
1450 s.fncache._dirty = True
1450 s.fncache.write(tr)
1451 s.fncache.write(tr)
1451 timer(d)
1452 timer(d)
1452 tr.close()
1453 tr.close()
1453 lock.release()
1454 lock.release()
1454 fm.end()
1455 fm.end()
1455
1456
1456 @command(b'perffncacheencode', formatteropts)
1457 @command(b'perffncacheencode', formatteropts)
1457 def perffncacheencode(ui, repo, **opts):
1458 def perffncacheencode(ui, repo, **opts):
1458 opts = _byteskwargs(opts)
1459 opts = _byteskwargs(opts)
1459 timer, fm = gettimer(ui, opts)
1460 timer, fm = gettimer(ui, opts)
1460 s = repo.store
1461 s = repo.store
1461 s.fncache._load()
1462 s.fncache._load()
1462 def d():
1463 def d():
1463 for p in s.fncache.entries:
1464 for p in s.fncache.entries:
1464 s.encode(p)
1465 s.encode(p)
1465 timer(d)
1466 timer(d)
1466 fm.end()
1467 fm.end()
1467
1468
1468 def _bdiffworker(q, blocks, xdiff, ready, done):
1469 def _bdiffworker(q, blocks, xdiff, ready, done):
1469 while not done.is_set():
1470 while not done.is_set():
1470 pair = q.get()
1471 pair = q.get()
1471 while pair is not None:
1472 while pair is not None:
1472 if xdiff:
1473 if xdiff:
1473 mdiff.bdiff.xdiffblocks(*pair)
1474 mdiff.bdiff.xdiffblocks(*pair)
1474 elif blocks:
1475 elif blocks:
1475 mdiff.bdiff.blocks(*pair)
1476 mdiff.bdiff.blocks(*pair)
1476 else:
1477 else:
1477 mdiff.textdiff(*pair)
1478 mdiff.textdiff(*pair)
1478 q.task_done()
1479 q.task_done()
1479 pair = q.get()
1480 pair = q.get()
1480 q.task_done() # for the None one
1481 q.task_done() # for the None one
1481 with ready:
1482 with ready:
1482 ready.wait()
1483 ready.wait()
1483
1484
1484 def _manifestrevision(repo, mnode):
1485 def _manifestrevision(repo, mnode):
1485 ml = repo.manifestlog
1486 ml = repo.manifestlog
1486
1487
1487 if util.safehasattr(ml, b'getstorage'):
1488 if util.safehasattr(ml, b'getstorage'):
1488 store = ml.getstorage(b'')
1489 store = ml.getstorage(b'')
1489 else:
1490 else:
1490 store = ml._revlog
1491 store = ml._revlog
1491
1492
1492 return store.revision(mnode)
1493 return store.revision(mnode)
1493
1494
1494 @command(b'perfbdiff', revlogopts + formatteropts + [
1495 @command(b'perfbdiff', revlogopts + formatteropts + [
1495 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1496 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1496 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1497 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1497 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1498 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1498 (b'', b'blocks', False, b'test computing diffs into blocks'),
1499 (b'', b'blocks', False, b'test computing diffs into blocks'),
1499 (b'', b'xdiff', False, b'use xdiff algorithm'),
1500 (b'', b'xdiff', False, b'use xdiff algorithm'),
1500 ],
1501 ],
1501
1502
1502 b'-c|-m|FILE REV')
1503 b'-c|-m|FILE REV')
1503 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1504 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1504 """benchmark a bdiff between revisions
1505 """benchmark a bdiff between revisions
1505
1506
1506 By default, benchmark a bdiff between its delta parent and itself.
1507 By default, benchmark a bdiff between its delta parent and itself.
1507
1508
1508 With ``--count``, benchmark bdiffs between delta parents and self for N
1509 With ``--count``, benchmark bdiffs between delta parents and self for N
1509 revisions starting at the specified revision.
1510 revisions starting at the specified revision.
1510
1511
1511 With ``--alldata``, assume the requested revision is a changeset and
1512 With ``--alldata``, assume the requested revision is a changeset and
1512 measure bdiffs for all changes related to that changeset (manifest
1513 measure bdiffs for all changes related to that changeset (manifest
1513 and filelogs).
1514 and filelogs).
1514 """
1515 """
1515 opts = _byteskwargs(opts)
1516 opts = _byteskwargs(opts)
1516
1517
1517 if opts[b'xdiff'] and not opts[b'blocks']:
1518 if opts[b'xdiff'] and not opts[b'blocks']:
1518 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1519 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1519
1520
1520 if opts[b'alldata']:
1521 if opts[b'alldata']:
1521 opts[b'changelog'] = True
1522 opts[b'changelog'] = True
1522
1523
1523 if opts.get(b'changelog') or opts.get(b'manifest'):
1524 if opts.get(b'changelog') or opts.get(b'manifest'):
1524 file_, rev = None, file_
1525 file_, rev = None, file_
1525 elif rev is None:
1526 elif rev is None:
1526 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1527 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1527
1528
1528 blocks = opts[b'blocks']
1529 blocks = opts[b'blocks']
1529 xdiff = opts[b'xdiff']
1530 xdiff = opts[b'xdiff']
1530 textpairs = []
1531 textpairs = []
1531
1532
1532 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1533 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1533
1534
1534 startrev = r.rev(r.lookup(rev))
1535 startrev = r.rev(r.lookup(rev))
1535 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1536 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1536 if opts[b'alldata']:
1537 if opts[b'alldata']:
1537 # Load revisions associated with changeset.
1538 # Load revisions associated with changeset.
1538 ctx = repo[rev]
1539 ctx = repo[rev]
1539 mtext = _manifestrevision(repo, ctx.manifestnode())
1540 mtext = _manifestrevision(repo, ctx.manifestnode())
1540 for pctx in ctx.parents():
1541 for pctx in ctx.parents():
1541 pman = _manifestrevision(repo, pctx.manifestnode())
1542 pman = _manifestrevision(repo, pctx.manifestnode())
1542 textpairs.append((pman, mtext))
1543 textpairs.append((pman, mtext))
1543
1544
1544 # Load filelog revisions by iterating manifest delta.
1545 # Load filelog revisions by iterating manifest delta.
1545 man = ctx.manifest()
1546 man = ctx.manifest()
1546 pman = ctx.p1().manifest()
1547 pman = ctx.p1().manifest()
1547 for filename, change in pman.diff(man).items():
1548 for filename, change in pman.diff(man).items():
1548 fctx = repo.file(filename)
1549 fctx = repo.file(filename)
1549 f1 = fctx.revision(change[0][0] or -1)
1550 f1 = fctx.revision(change[0][0] or -1)
1550 f2 = fctx.revision(change[1][0] or -1)
1551 f2 = fctx.revision(change[1][0] or -1)
1551 textpairs.append((f1, f2))
1552 textpairs.append((f1, f2))
1552 else:
1553 else:
1553 dp = r.deltaparent(rev)
1554 dp = r.deltaparent(rev)
1554 textpairs.append((r.revision(dp), r.revision(rev)))
1555 textpairs.append((r.revision(dp), r.revision(rev)))
1555
1556
1556 withthreads = threads > 0
1557 withthreads = threads > 0
1557 if not withthreads:
1558 if not withthreads:
1558 def d():
1559 def d():
1559 for pair in textpairs:
1560 for pair in textpairs:
1560 if xdiff:
1561 if xdiff:
1561 mdiff.bdiff.xdiffblocks(*pair)
1562 mdiff.bdiff.xdiffblocks(*pair)
1562 elif blocks:
1563 elif blocks:
1563 mdiff.bdiff.blocks(*pair)
1564 mdiff.bdiff.blocks(*pair)
1564 else:
1565 else:
1565 mdiff.textdiff(*pair)
1566 mdiff.textdiff(*pair)
1566 else:
1567 else:
1567 q = queue()
1568 q = queue()
1568 for i in _xrange(threads):
1569 for i in _xrange(threads):
1569 q.put(None)
1570 q.put(None)
1570 ready = threading.Condition()
1571 ready = threading.Condition()
1571 done = threading.Event()
1572 done = threading.Event()
1572 for i in _xrange(threads):
1573 for i in _xrange(threads):
1573 threading.Thread(target=_bdiffworker,
1574 threading.Thread(target=_bdiffworker,
1574 args=(q, blocks, xdiff, ready, done)).start()
1575 args=(q, blocks, xdiff, ready, done)).start()
1575 q.join()
1576 q.join()
1576 def d():
1577 def d():
1577 for pair in textpairs:
1578 for pair in textpairs:
1578 q.put(pair)
1579 q.put(pair)
1579 for i in _xrange(threads):
1580 for i in _xrange(threads):
1580 q.put(None)
1581 q.put(None)
1581 with ready:
1582 with ready:
1582 ready.notify_all()
1583 ready.notify_all()
1583 q.join()
1584 q.join()
1584 timer, fm = gettimer(ui, opts)
1585 timer, fm = gettimer(ui, opts)
1585 timer(d)
1586 timer(d)
1586 fm.end()
1587 fm.end()
1587
1588
1588 if withthreads:
1589 if withthreads:
1589 done.set()
1590 done.set()
1590 for i in _xrange(threads):
1591 for i in _xrange(threads):
1591 q.put(None)
1592 q.put(None)
1592 with ready:
1593 with ready:
1593 ready.notify_all()
1594 ready.notify_all()
1594
1595
1595 @command(b'perfunidiff', revlogopts + formatteropts + [
1596 @command(b'perfunidiff', revlogopts + formatteropts + [
1596 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1597 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1597 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
1598 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
1598 ], b'-c|-m|FILE REV')
1599 ], b'-c|-m|FILE REV')
1599 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1600 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1600 """benchmark a unified diff between revisions
1601 """benchmark a unified diff between revisions
1601
1602
1602 This doesn't include any copy tracing - it's just a unified diff
1603 This doesn't include any copy tracing - it's just a unified diff
1603 of the texts.
1604 of the texts.
1604
1605
1605 By default, benchmark a diff between its delta parent and itself.
1606 By default, benchmark a diff between its delta parent and itself.
1606
1607
1607 With ``--count``, benchmark diffs between delta parents and self for N
1608 With ``--count``, benchmark diffs between delta parents and self for N
1608 revisions starting at the specified revision.
1609 revisions starting at the specified revision.
1609
1610
1610 With ``--alldata``, assume the requested revision is a changeset and
1611 With ``--alldata``, assume the requested revision is a changeset and
1611 measure diffs for all changes related to that changeset (manifest
1612 measure diffs for all changes related to that changeset (manifest
1612 and filelogs).
1613 and filelogs).
1613 """
1614 """
1614 opts = _byteskwargs(opts)
1615 opts = _byteskwargs(opts)
1615 if opts[b'alldata']:
1616 if opts[b'alldata']:
1616 opts[b'changelog'] = True
1617 opts[b'changelog'] = True
1617
1618
1618 if opts.get(b'changelog') or opts.get(b'manifest'):
1619 if opts.get(b'changelog') or opts.get(b'manifest'):
1619 file_, rev = None, file_
1620 file_, rev = None, file_
1620 elif rev is None:
1621 elif rev is None:
1621 raise error.CommandError(b'perfunidiff', b'invalid arguments')
1622 raise error.CommandError(b'perfunidiff', b'invalid arguments')
1622
1623
1623 textpairs = []
1624 textpairs = []
1624
1625
1625 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
1626 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
1626
1627
1627 startrev = r.rev(r.lookup(rev))
1628 startrev = r.rev(r.lookup(rev))
1628 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1629 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1629 if opts[b'alldata']:
1630 if opts[b'alldata']:
1630 # Load revisions associated with changeset.
1631 # Load revisions associated with changeset.
1631 ctx = repo[rev]
1632 ctx = repo[rev]
1632 mtext = _manifestrevision(repo, ctx.manifestnode())
1633 mtext = _manifestrevision(repo, ctx.manifestnode())
1633 for pctx in ctx.parents():
1634 for pctx in ctx.parents():
1634 pman = _manifestrevision(repo, pctx.manifestnode())
1635 pman = _manifestrevision(repo, pctx.manifestnode())
1635 textpairs.append((pman, mtext))
1636 textpairs.append((pman, mtext))
1636
1637
1637 # Load filelog revisions by iterating manifest delta.
1638 # Load filelog revisions by iterating manifest delta.
1638 man = ctx.manifest()
1639 man = ctx.manifest()
1639 pman = ctx.p1().manifest()
1640 pman = ctx.p1().manifest()
1640 for filename, change in pman.diff(man).items():
1641 for filename, change in pman.diff(man).items():
1641 fctx = repo.file(filename)
1642 fctx = repo.file(filename)
1642 f1 = fctx.revision(change[0][0] or -1)
1643 f1 = fctx.revision(change[0][0] or -1)
1643 f2 = fctx.revision(change[1][0] or -1)
1644 f2 = fctx.revision(change[1][0] or -1)
1644 textpairs.append((f1, f2))
1645 textpairs.append((f1, f2))
1645 else:
1646 else:
1646 dp = r.deltaparent(rev)
1647 dp = r.deltaparent(rev)
1647 textpairs.append((r.revision(dp), r.revision(rev)))
1648 textpairs.append((r.revision(dp), r.revision(rev)))
1648
1649
1649 def d():
1650 def d():
1650 for left, right in textpairs:
1651 for left, right in textpairs:
1651 # The date strings don't matter, so we pass empty strings.
1652 # The date strings don't matter, so we pass empty strings.
1652 headerlines, hunks = mdiff.unidiff(
1653 headerlines, hunks = mdiff.unidiff(
1653 left, b'', right, b'', b'left', b'right', binary=False)
1654 left, b'', right, b'', b'left', b'right', binary=False)
1654 # consume iterators in roughly the way patch.py does
1655 # consume iterators in roughly the way patch.py does
1655 b'\n'.join(headerlines)
1656 b'\n'.join(headerlines)
1656 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1657 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1657 timer, fm = gettimer(ui, opts)
1658 timer, fm = gettimer(ui, opts)
1658 timer(d)
1659 timer(d)
1659 fm.end()
1660 fm.end()
1660
1661
1661 @command(b'perfdiffwd', formatteropts)
1662 @command(b'perfdiffwd', formatteropts)
1662 def perfdiffwd(ui, repo, **opts):
1663 def perfdiffwd(ui, repo, **opts):
1663 """Profile diff of working directory changes"""
1664 """Profile diff of working directory changes"""
1664 opts = _byteskwargs(opts)
1665 opts = _byteskwargs(opts)
1665 timer, fm = gettimer(ui, opts)
1666 timer, fm = gettimer(ui, opts)
1666 options = {
1667 options = {
1667 'w': 'ignore_all_space',
1668 'w': 'ignore_all_space',
1668 'b': 'ignore_space_change',
1669 'b': 'ignore_space_change',
1669 'B': 'ignore_blank_lines',
1670 'B': 'ignore_blank_lines',
1670 }
1671 }
1671
1672
1672 for diffopt in ('', 'w', 'b', 'B', 'wB'):
1673 for diffopt in ('', 'w', 'b', 'B', 'wB'):
1673 opts = dict((options[c], b'1') for c in diffopt)
1674 opts = dict((options[c], b'1') for c in diffopt)
1674 def d():
1675 def d():
1675 ui.pushbuffer()
1676 ui.pushbuffer()
1676 commands.diff(ui, repo, **opts)
1677 commands.diff(ui, repo, **opts)
1677 ui.popbuffer()
1678 ui.popbuffer()
1678 diffopt = diffopt.encode('ascii')
1679 diffopt = diffopt.encode('ascii')
1679 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
1680 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
1680 timer(d, title=title)
1681 timer(d, title=title)
1681 fm.end()
1682 fm.end()
1682
1683
1683 @command(b'perfrevlogindex', revlogopts + formatteropts,
1684 @command(b'perfrevlogindex', revlogopts + formatteropts,
1684 b'-c|-m|FILE')
1685 b'-c|-m|FILE')
1685 def perfrevlogindex(ui, repo, file_=None, **opts):
1686 def perfrevlogindex(ui, repo, file_=None, **opts):
1686 """Benchmark operations against a revlog index.
1687 """Benchmark operations against a revlog index.
1687
1688
1688 This tests constructing a revlog instance, reading index data,
1689 This tests constructing a revlog instance, reading index data,
1689 parsing index data, and performing various operations related to
1690 parsing index data, and performing various operations related to
1690 index data.
1691 index data.
1691 """
1692 """
1692
1693
1693 opts = _byteskwargs(opts)
1694 opts = _byteskwargs(opts)
1694
1695
1695 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
1696 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
1696
1697
1697 opener = getattr(rl, 'opener') # trick linter
1698 opener = getattr(rl, 'opener') # trick linter
1698 indexfile = rl.indexfile
1699 indexfile = rl.indexfile
1699 data = opener.read(indexfile)
1700 data = opener.read(indexfile)
1700
1701
1701 header = struct.unpack(b'>I', data[0:4])[0]
1702 header = struct.unpack(b'>I', data[0:4])[0]
1702 version = header & 0xFFFF
1703 version = header & 0xFFFF
1703 if version == 1:
1704 if version == 1:
1704 revlogio = revlog.revlogio()
1705 revlogio = revlog.revlogio()
1705 inline = header & (1 << 16)
1706 inline = header & (1 << 16)
1706 else:
1707 else:
1707 raise error.Abort((b'unsupported revlog version: %d') % version)
1708 raise error.Abort((b'unsupported revlog version: %d') % version)
1708
1709
1709 rllen = len(rl)
1710 rllen = len(rl)
1710
1711
1711 node0 = rl.node(0)
1712 node0 = rl.node(0)
1712 node25 = rl.node(rllen // 4)
1713 node25 = rl.node(rllen // 4)
1713 node50 = rl.node(rllen // 2)
1714 node50 = rl.node(rllen // 2)
1714 node75 = rl.node(rllen // 4 * 3)
1715 node75 = rl.node(rllen // 4 * 3)
1715 node100 = rl.node(rllen - 1)
1716 node100 = rl.node(rllen - 1)
1716
1717
1717 allrevs = range(rllen)
1718 allrevs = range(rllen)
1718 allrevsrev = list(reversed(allrevs))
1719 allrevsrev = list(reversed(allrevs))
1719 allnodes = [rl.node(rev) for rev in range(rllen)]
1720 allnodes = [rl.node(rev) for rev in range(rllen)]
1720 allnodesrev = list(reversed(allnodes))
1721 allnodesrev = list(reversed(allnodes))
1721
1722
1722 def constructor():
1723 def constructor():
1723 revlog.revlog(opener, indexfile)
1724 revlog.revlog(opener, indexfile)
1724
1725
1725 def read():
1726 def read():
1726 with opener(indexfile) as fh:
1727 with opener(indexfile) as fh:
1727 fh.read()
1728 fh.read()
1728
1729
1729 def parseindex():
1730 def parseindex():
1730 revlogio.parseindex(data, inline)
1731 revlogio.parseindex(data, inline)
1731
1732
1732 def getentry(revornode):
1733 def getentry(revornode):
1733 index = revlogio.parseindex(data, inline)[0]
1734 index = revlogio.parseindex(data, inline)[0]
1734 index[revornode]
1735 index[revornode]
1735
1736
1736 def getentries(revs, count=1):
1737 def getentries(revs, count=1):
1737 index = revlogio.parseindex(data, inline)[0]
1738 index = revlogio.parseindex(data, inline)[0]
1738
1739
1739 for i in range(count):
1740 for i in range(count):
1740 for rev in revs:
1741 for rev in revs:
1741 index[rev]
1742 index[rev]
1742
1743
1743 def resolvenode(node):
1744 def resolvenode(node):
1744 nodemap = revlogio.parseindex(data, inline)[1]
1745 nodemap = revlogio.parseindex(data, inline)[1]
1745 # This only works for the C code.
1746 # This only works for the C code.
1746 if nodemap is None:
1747 if nodemap is None:
1747 return
1748 return
1748
1749
1749 try:
1750 try:
1750 nodemap[node]
1751 nodemap[node]
1751 except error.RevlogError:
1752 except error.RevlogError:
1752 pass
1753 pass
1753
1754
1754 def resolvenodes(nodes, count=1):
1755 def resolvenodes(nodes, count=1):
1755 nodemap = revlogio.parseindex(data, inline)[1]
1756 nodemap = revlogio.parseindex(data, inline)[1]
1756 if nodemap is None:
1757 if nodemap is None:
1757 return
1758 return
1758
1759
1759 for i in range(count):
1760 for i in range(count):
1760 for node in nodes:
1761 for node in nodes:
1761 try:
1762 try:
1762 nodemap[node]
1763 nodemap[node]
1763 except error.RevlogError:
1764 except error.RevlogError:
1764 pass
1765 pass
1765
1766
1766 benches = [
1767 benches = [
1767 (constructor, b'revlog constructor'),
1768 (constructor, b'revlog constructor'),
1768 (read, b'read'),
1769 (read, b'read'),
1769 (parseindex, b'create index object'),
1770 (parseindex, b'create index object'),
1770 (lambda: getentry(0), b'retrieve index entry for rev 0'),
1771 (lambda: getentry(0), b'retrieve index entry for rev 0'),
1771 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
1772 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
1772 (lambda: resolvenode(node0), b'look up node at rev 0'),
1773 (lambda: resolvenode(node0), b'look up node at rev 0'),
1773 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
1774 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
1774 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
1775 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
1775 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
1776 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
1776 (lambda: resolvenode(node100), b'look up node at tip'),
1777 (lambda: resolvenode(node100), b'look up node at tip'),
1777 # 2x variation is to measure caching impact.
1778 # 2x variation is to measure caching impact.
1778 (lambda: resolvenodes(allnodes),
1779 (lambda: resolvenodes(allnodes),
1779 b'look up all nodes (forward)'),
1780 b'look up all nodes (forward)'),
1780 (lambda: resolvenodes(allnodes, 2),
1781 (lambda: resolvenodes(allnodes, 2),
1781 b'look up all nodes 2x (forward)'),
1782 b'look up all nodes 2x (forward)'),
1782 (lambda: resolvenodes(allnodesrev),
1783 (lambda: resolvenodes(allnodesrev),
1783 b'look up all nodes (reverse)'),
1784 b'look up all nodes (reverse)'),
1784 (lambda: resolvenodes(allnodesrev, 2),
1785 (lambda: resolvenodes(allnodesrev, 2),
1785 b'look up all nodes 2x (reverse)'),
1786 b'look up all nodes 2x (reverse)'),
1786 (lambda: getentries(allrevs),
1787 (lambda: getentries(allrevs),
1787 b'retrieve all index entries (forward)'),
1788 b'retrieve all index entries (forward)'),
1788 (lambda: getentries(allrevs, 2),
1789 (lambda: getentries(allrevs, 2),
1789 b'retrieve all index entries 2x (forward)'),
1790 b'retrieve all index entries 2x (forward)'),
1790 (lambda: getentries(allrevsrev),
1791 (lambda: getentries(allrevsrev),
1791 b'retrieve all index entries (reverse)'),
1792 b'retrieve all index entries (reverse)'),
1792 (lambda: getentries(allrevsrev, 2),
1793 (lambda: getentries(allrevsrev, 2),
1793 b'retrieve all index entries 2x (reverse)'),
1794 b'retrieve all index entries 2x (reverse)'),
1794 ]
1795 ]
1795
1796
1796 for fn, title in benches:
1797 for fn, title in benches:
1797 timer, fm = gettimer(ui, opts)
1798 timer, fm = gettimer(ui, opts)
1798 timer(fn, title=title)
1799 timer(fn, title=title)
1799 fm.end()
1800 fm.end()
1800
1801
1801 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
1802 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
1802 [(b'd', b'dist', 100, b'distance between the revisions'),
1803 [(b'd', b'dist', 100, b'distance between the revisions'),
1803 (b's', b'startrev', 0, b'revision to start reading at'),
1804 (b's', b'startrev', 0, b'revision to start reading at'),
1804 (b'', b'reverse', False, b'read in reverse')],
1805 (b'', b'reverse', False, b'read in reverse')],
1805 b'-c|-m|FILE')
1806 b'-c|-m|FILE')
1806 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
1807 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
1807 **opts):
1808 **opts):
1808 """Benchmark reading a series of revisions from a revlog.
1809 """Benchmark reading a series of revisions from a revlog.
1809
1810
1810 By default, we read every ``-d/--dist`` revision from 0 to tip of
1811 By default, we read every ``-d/--dist`` revision from 0 to tip of
1811 the specified revlog.
1812 the specified revlog.
1812
1813
1813 The start revision can be defined via ``-s/--startrev``.
1814 The start revision can be defined via ``-s/--startrev``.
1814 """
1815 """
1815 opts = _byteskwargs(opts)
1816 opts = _byteskwargs(opts)
1816
1817
1817 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
1818 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
1818 rllen = getlen(ui)(rl)
1819 rllen = getlen(ui)(rl)
1819
1820
1820 if startrev < 0:
1821 if startrev < 0:
1821 startrev = rllen + startrev
1822 startrev = rllen + startrev
1822
1823
1823 def d():
1824 def d():
1824 rl.clearcaches()
1825 rl.clearcaches()
1825
1826
1826 beginrev = startrev
1827 beginrev = startrev
1827 endrev = rllen
1828 endrev = rllen
1828 dist = opts[b'dist']
1829 dist = opts[b'dist']
1829
1830
1830 if reverse:
1831 if reverse:
1831 beginrev, endrev = endrev - 1, beginrev - 1
1832 beginrev, endrev = endrev - 1, beginrev - 1
1832 dist = -1 * dist
1833 dist = -1 * dist
1833
1834
1834 for x in _xrange(beginrev, endrev, dist):
1835 for x in _xrange(beginrev, endrev, dist):
1835 # Old revisions don't support passing int.
1836 # Old revisions don't support passing int.
1836 n = rl.node(x)
1837 n = rl.node(x)
1837 rl.revision(n)
1838 rl.revision(n)
1838
1839
1839 timer, fm = gettimer(ui, opts)
1840 timer, fm = gettimer(ui, opts)
1840 timer(d)
1841 timer(d)
1841 fm.end()
1842 fm.end()
1842
1843
1843 @command(b'perfrevlogwrite', revlogopts + formatteropts +
1844 @command(b'perfrevlogwrite', revlogopts + formatteropts +
1844 [(b's', b'startrev', 1000, b'revision to start writing at'),
1845 [(b's', b'startrev', 1000, b'revision to start writing at'),
1845 (b'', b'stoprev', -1, b'last revision to write'),
1846 (b'', b'stoprev', -1, b'last revision to write'),
1846 (b'', b'count', 3, b'last revision to write'),
1847 (b'', b'count', 3, b'last revision to write'),
1847 (b'', b'details', False, b'print timing for every revisions tested'),
1848 (b'', b'details', False, b'print timing for every revisions tested'),
1848 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
1849 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
1849 (b'', b'lazydeltabase', True, b'try the provided delta first'),
1850 (b'', b'lazydeltabase', True, b'try the provided delta first'),
1850 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1851 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1851 ],
1852 ],
1852 b'-c|-m|FILE')
1853 b'-c|-m|FILE')
1853 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
1854 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
1854 """Benchmark writing a series of revisions to a revlog.
1855 """Benchmark writing a series of revisions to a revlog.
1855
1856
1856 Possible source values are:
1857 Possible source values are:
1857 * `full`: add from a full text (default).
1858 * `full`: add from a full text (default).
1858 * `parent-1`: add from a delta to the first parent
1859 * `parent-1`: add from a delta to the first parent
1859 * `parent-2`: add from a delta to the second parent if it exists
1860 * `parent-2`: add from a delta to the second parent if it exists
1860 (use a delta from the first parent otherwise)
1861 (use a delta from the first parent otherwise)
1861 * `parent-smallest`: add from the smallest delta (either p1 or p2)
1862 * `parent-smallest`: add from the smallest delta (either p1 or p2)
1862 * `storage`: add from the existing precomputed deltas
1863 * `storage`: add from the existing precomputed deltas
1863 """
1864 """
1864 opts = _byteskwargs(opts)
1865 opts = _byteskwargs(opts)
1865
1866
1866 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
1867 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
1867 rllen = getlen(ui)(rl)
1868 rllen = getlen(ui)(rl)
1868 if startrev < 0:
1869 if startrev < 0:
1869 startrev = rllen + startrev
1870 startrev = rllen + startrev
1870 if stoprev < 0:
1871 if stoprev < 0:
1871 stoprev = rllen + stoprev
1872 stoprev = rllen + stoprev
1872
1873
1873 lazydeltabase = opts['lazydeltabase']
1874 lazydeltabase = opts['lazydeltabase']
1874 source = opts['source']
1875 source = opts['source']
1875 clearcaches = opts['clear_caches']
1876 clearcaches = opts['clear_caches']
1876 validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest',
1877 validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest',
1877 b'storage')
1878 b'storage')
1878 if source not in validsource:
1879 if source not in validsource:
1879 raise error.Abort('invalid source type: %s' % source)
1880 raise error.Abort('invalid source type: %s' % source)
1880
1881
1881 ### actually gather results
1882 ### actually gather results
1882 count = opts['count']
1883 count = opts['count']
1883 if count <= 0:
1884 if count <= 0:
1884 raise error.Abort('invalide run count: %d' % count)
1885 raise error.Abort('invalide run count: %d' % count)
1885 allresults = []
1886 allresults = []
1886 for c in range(count):
1887 for c in range(count):
1887 timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1,
1888 timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1,
1888 lazydeltabase=lazydeltabase,
1889 lazydeltabase=lazydeltabase,
1889 clearcaches=clearcaches)
1890 clearcaches=clearcaches)
1890 allresults.append(timing)
1891 allresults.append(timing)
1891
1892
1892 ### consolidate the results in a single list
1893 ### consolidate the results in a single list
1893 results = []
1894 results = []
1894 for idx, (rev, t) in enumerate(allresults[0]):
1895 for idx, (rev, t) in enumerate(allresults[0]):
1895 ts = [t]
1896 ts = [t]
1896 for other in allresults[1:]:
1897 for other in allresults[1:]:
1897 orev, ot = other[idx]
1898 orev, ot = other[idx]
1898 assert orev == rev
1899 assert orev == rev
1899 ts.append(ot)
1900 ts.append(ot)
1900 results.append((rev, ts))
1901 results.append((rev, ts))
1901 resultcount = len(results)
1902 resultcount = len(results)
1902
1903
1903 ### Compute and display relevant statistics
1904 ### Compute and display relevant statistics
1904
1905
1905 # get a formatter
1906 # get a formatter
1906 fm = ui.formatter(b'perf', opts)
1907 fm = ui.formatter(b'perf', opts)
1907 displayall = ui.configbool(b"perf", b"all-timing", False)
1908 displayall = ui.configbool(b"perf", b"all-timing", False)
1908
1909
1909 # print individual details if requested
1910 # print individual details if requested
1910 if opts['details']:
1911 if opts['details']:
1911 for idx, item in enumerate(results, 1):
1912 for idx, item in enumerate(results, 1):
1912 rev, data = item
1913 rev, data = item
1913 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
1914 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
1914 formatone(fm, data, title=title, displayall=displayall)
1915 formatone(fm, data, title=title, displayall=displayall)
1915
1916
1916 # sorts results by median time
1917 # sorts results by median time
1917 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
1918 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
1918 # list of (name, index) to display)
1919 # list of (name, index) to display)
1919 relevants = [
1920 relevants = [
1920 ("min", 0),
1921 ("min", 0),
1921 ("10%", resultcount * 10 // 100),
1922 ("10%", resultcount * 10 // 100),
1922 ("25%", resultcount * 25 // 100),
1923 ("25%", resultcount * 25 // 100),
1923 ("50%", resultcount * 70 // 100),
1924 ("50%", resultcount * 70 // 100),
1924 ("75%", resultcount * 75 // 100),
1925 ("75%", resultcount * 75 // 100),
1925 ("90%", resultcount * 90 // 100),
1926 ("90%", resultcount * 90 // 100),
1926 ("95%", resultcount * 95 // 100),
1927 ("95%", resultcount * 95 // 100),
1927 ("99%", resultcount * 99 // 100),
1928 ("99%", resultcount * 99 // 100),
1928 ("99.9%", resultcount * 999 // 1000),
1929 ("99.9%", resultcount * 999 // 1000),
1929 ("99.99%", resultcount * 9999 // 10000),
1930 ("99.99%", resultcount * 9999 // 10000),
1930 ("99.999%", resultcount * 99999 // 100000),
1931 ("99.999%", resultcount * 99999 // 100000),
1931 ("max", -1),
1932 ("max", -1),
1932 ]
1933 ]
1933 if not ui.quiet:
1934 if not ui.quiet:
1934 for name, idx in relevants:
1935 for name, idx in relevants:
1935 data = results[idx]
1936 data = results[idx]
1936 title = '%s of %d, rev %d' % (name, resultcount, data[0])
1937 title = '%s of %d, rev %d' % (name, resultcount, data[0])
1937 formatone(fm, data[1], title=title, displayall=displayall)
1938 formatone(fm, data[1], title=title, displayall=displayall)
1938
1939
1939 # XXX summing that many float will not be very precise, we ignore this fact
1940 # XXX summing that many float will not be very precise, we ignore this fact
1940 # for now
1941 # for now
1941 totaltime = []
1942 totaltime = []
1942 for item in allresults:
1943 for item in allresults:
1943 totaltime.append((sum(x[1][0] for x in item),
1944 totaltime.append((sum(x[1][0] for x in item),
1944 sum(x[1][1] for x in item),
1945 sum(x[1][1] for x in item),
1945 sum(x[1][2] for x in item),)
1946 sum(x[1][2] for x in item),)
1946 )
1947 )
1947 formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
1948 formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
1948 displayall=displayall)
1949 displayall=displayall)
1949 fm.end()
1950 fm.end()
1950
1951
1951 class _faketr(object):
1952 class _faketr(object):
1952 def add(s, x, y, z=None):
1953 def add(s, x, y, z=None):
1953 return None
1954 return None
1954
1955
1955 def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None,
1956 def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None,
1956 lazydeltabase=True, clearcaches=True):
1957 lazydeltabase=True, clearcaches=True):
1957 timings = []
1958 timings = []
1958 tr = _faketr()
1959 tr = _faketr()
1959 with _temprevlog(ui, orig, startrev) as dest:
1960 with _temprevlog(ui, orig, startrev) as dest:
1960 dest._lazydeltabase = lazydeltabase
1961 dest._lazydeltabase = lazydeltabase
1961 revs = list(orig.revs(startrev, stoprev))
1962 revs = list(orig.revs(startrev, stoprev))
1962 total = len(revs)
1963 total = len(revs)
1963 topic = 'adding'
1964 topic = 'adding'
1964 if runidx is not None:
1965 if runidx is not None:
1965 topic += ' (run #%d)' % runidx
1966 topic += ' (run #%d)' % runidx
1966 # Support both old and new progress API
1967 # Support both old and new progress API
1967 if util.safehasattr(ui, 'makeprogress'):
1968 if util.safehasattr(ui, 'makeprogress'):
1968 progress = ui.makeprogress(topic, unit='revs', total=total)
1969 progress = ui.makeprogress(topic, unit='revs', total=total)
1969 def updateprogress(pos):
1970 def updateprogress(pos):
1970 progress.update(pos)
1971 progress.update(pos)
1971 def completeprogress():
1972 def completeprogress():
1972 progress.complete()
1973 progress.complete()
1973 else:
1974 else:
1974 def updateprogress(pos):
1975 def updateprogress(pos):
1975 ui.progress(topic, pos, unit='revs', total=total)
1976 ui.progress(topic, pos, unit='revs', total=total)
1976 def completeprogress():
1977 def completeprogress():
1977 ui.progress(topic, None, unit='revs', total=total)
1978 ui.progress(topic, None, unit='revs', total=total)
1978
1979
1979 for idx, rev in enumerate(revs):
1980 for idx, rev in enumerate(revs):
1980 updateprogress(idx)
1981 updateprogress(idx)
1981 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
1982 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
1982 if clearcaches:
1983 if clearcaches:
1983 dest.index.clearcaches()
1984 dest.index.clearcaches()
1984 dest.clearcaches()
1985 dest.clearcaches()
1985 with timeone() as r:
1986 with timeone() as r:
1986 dest.addrawrevision(*addargs, **addkwargs)
1987 dest.addrawrevision(*addargs, **addkwargs)
1987 timings.append((rev, r[0]))
1988 timings.append((rev, r[0]))
1988 updateprogress(total)
1989 updateprogress(total)
1989 completeprogress()
1990 completeprogress()
1990 return timings
1991 return timings
1991
1992
1992 def _getrevisionseed(orig, rev, tr, source):
1993 def _getrevisionseed(orig, rev, tr, source):
1993 from mercurial.node import nullid
1994 from mercurial.node import nullid
1994
1995
1995 linkrev = orig.linkrev(rev)
1996 linkrev = orig.linkrev(rev)
1996 node = orig.node(rev)
1997 node = orig.node(rev)
1997 p1, p2 = orig.parents(node)
1998 p1, p2 = orig.parents(node)
1998 flags = orig.flags(rev)
1999 flags = orig.flags(rev)
1999 cachedelta = None
2000 cachedelta = None
2000 text = None
2001 text = None
2001
2002
2002 if source == b'full':
2003 if source == b'full':
2003 text = orig.revision(rev)
2004 text = orig.revision(rev)
2004 elif source == b'parent-1':
2005 elif source == b'parent-1':
2005 baserev = orig.rev(p1)
2006 baserev = orig.rev(p1)
2006 cachedelta = (baserev, orig.revdiff(p1, rev))
2007 cachedelta = (baserev, orig.revdiff(p1, rev))
2007 elif source == b'parent-2':
2008 elif source == b'parent-2':
2008 parent = p2
2009 parent = p2
2009 if p2 == nullid:
2010 if p2 == nullid:
2010 parent = p1
2011 parent = p1
2011 baserev = orig.rev(parent)
2012 baserev = orig.rev(parent)
2012 cachedelta = (baserev, orig.revdiff(parent, rev))
2013 cachedelta = (baserev, orig.revdiff(parent, rev))
2013 elif source == b'parent-smallest':
2014 elif source == b'parent-smallest':
2014 p1diff = orig.revdiff(p1, rev)
2015 p1diff = orig.revdiff(p1, rev)
2015 parent = p1
2016 parent = p1
2016 diff = p1diff
2017 diff = p1diff
2017 if p2 != nullid:
2018 if p2 != nullid:
2018 p2diff = orig.revdiff(p2, rev)
2019 p2diff = orig.revdiff(p2, rev)
2019 if len(p1diff) > len(p2diff):
2020 if len(p1diff) > len(p2diff):
2020 parent = p2
2021 parent = p2
2021 diff = p2diff
2022 diff = p2diff
2022 baserev = orig.rev(parent)
2023 baserev = orig.rev(parent)
2023 cachedelta = (baserev, diff)
2024 cachedelta = (baserev, diff)
2024 elif source == b'storage':
2025 elif source == b'storage':
2025 baserev = orig.deltaparent(rev)
2026 baserev = orig.deltaparent(rev)
2026 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
2027 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
2027
2028
2028 return ((text, tr, linkrev, p1, p2),
2029 return ((text, tr, linkrev, p1, p2),
2029 {'node': node, 'flags': flags, 'cachedelta': cachedelta})
2030 {'node': node, 'flags': flags, 'cachedelta': cachedelta})
2030
2031
2031 @contextlib.contextmanager
2032 @contextlib.contextmanager
2032 def _temprevlog(ui, orig, truncaterev):
2033 def _temprevlog(ui, orig, truncaterev):
2033 from mercurial import vfs as vfsmod
2034 from mercurial import vfs as vfsmod
2034
2035
2035 if orig._inline:
2036 if orig._inline:
2036 raise error.Abort('not supporting inline revlog (yet)')
2037 raise error.Abort('not supporting inline revlog (yet)')
2037
2038
2038 origindexpath = orig.opener.join(orig.indexfile)
2039 origindexpath = orig.opener.join(orig.indexfile)
2039 origdatapath = orig.opener.join(orig.datafile)
2040 origdatapath = orig.opener.join(orig.datafile)
2040 indexname = 'revlog.i'
2041 indexname = 'revlog.i'
2041 dataname = 'revlog.d'
2042 dataname = 'revlog.d'
2042
2043
2043 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
2044 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
2044 try:
2045 try:
2045 # copy the data file in a temporary directory
2046 # copy the data file in a temporary directory
2046 ui.debug('copying data in %s\n' % tmpdir)
2047 ui.debug('copying data in %s\n' % tmpdir)
2047 destindexpath = os.path.join(tmpdir, 'revlog.i')
2048 destindexpath = os.path.join(tmpdir, 'revlog.i')
2048 destdatapath = os.path.join(tmpdir, 'revlog.d')
2049 destdatapath = os.path.join(tmpdir, 'revlog.d')
2049 shutil.copyfile(origindexpath, destindexpath)
2050 shutil.copyfile(origindexpath, destindexpath)
2050 shutil.copyfile(origdatapath, destdatapath)
2051 shutil.copyfile(origdatapath, destdatapath)
2051
2052
2052 # remove the data we want to add again
2053 # remove the data we want to add again
2053 ui.debug('truncating data to be rewritten\n')
2054 ui.debug('truncating data to be rewritten\n')
2054 with open(destindexpath, 'ab') as index:
2055 with open(destindexpath, 'ab') as index:
2055 index.seek(0)
2056 index.seek(0)
2056 index.truncate(truncaterev * orig._io.size)
2057 index.truncate(truncaterev * orig._io.size)
2057 with open(destdatapath, 'ab') as data:
2058 with open(destdatapath, 'ab') as data:
2058 data.seek(0)
2059 data.seek(0)
2059 data.truncate(orig.start(truncaterev))
2060 data.truncate(orig.start(truncaterev))
2060
2061
2061 # instantiate a new revlog from the temporary copy
2062 # instantiate a new revlog from the temporary copy
2062 ui.debug('truncating adding to be rewritten\n')
2063 ui.debug('truncating adding to be rewritten\n')
2063 vfs = vfsmod.vfs(tmpdir)
2064 vfs = vfsmod.vfs(tmpdir)
2064 vfs.options = getattr(orig.opener, 'options', None)
2065 vfs.options = getattr(orig.opener, 'options', None)
2065
2066
2066 dest = revlog.revlog(vfs,
2067 dest = revlog.revlog(vfs,
2067 indexfile=indexname,
2068 indexfile=indexname,
2068 datafile=dataname)
2069 datafile=dataname)
2069 if dest._inline:
2070 if dest._inline:
2070 raise error.Abort('not supporting inline revlog (yet)')
2071 raise error.Abort('not supporting inline revlog (yet)')
2071 # make sure internals are initialized
2072 # make sure internals are initialized
2072 dest.revision(len(dest) - 1)
2073 dest.revision(len(dest) - 1)
2073 yield dest
2074 yield dest
2074 del dest, vfs
2075 del dest, vfs
2075 finally:
2076 finally:
2076 shutil.rmtree(tmpdir, True)
2077 shutil.rmtree(tmpdir, True)
2077
2078
2078 @command(b'perfrevlogchunks', revlogopts + formatteropts +
2079 @command(b'perfrevlogchunks', revlogopts + formatteropts +
2079 [(b'e', b'engines', b'', b'compression engines to use'),
2080 [(b'e', b'engines', b'', b'compression engines to use'),
2080 (b's', b'startrev', 0, b'revision to start at')],
2081 (b's', b'startrev', 0, b'revision to start at')],
2081 b'-c|-m|FILE')
2082 b'-c|-m|FILE')
2082 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
2083 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
2083 """Benchmark operations on revlog chunks.
2084 """Benchmark operations on revlog chunks.
2084
2085
2085 Logically, each revlog is a collection of fulltext revisions. However,
2086 Logically, each revlog is a collection of fulltext revisions. However,
2086 stored within each revlog are "chunks" of possibly compressed data. This
2087 stored within each revlog are "chunks" of possibly compressed data. This
2087 data needs to be read and decompressed or compressed and written.
2088 data needs to be read and decompressed or compressed and written.
2088
2089
2089 This command measures the time it takes to read+decompress and recompress
2090 This command measures the time it takes to read+decompress and recompress
2090 chunks in a revlog. It effectively isolates I/O and compression performance.
2091 chunks in a revlog. It effectively isolates I/O and compression performance.
2091 For measurements of higher-level operations like resolving revisions,
2092 For measurements of higher-level operations like resolving revisions,
2092 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
2093 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
2093 """
2094 """
2094 opts = _byteskwargs(opts)
2095 opts = _byteskwargs(opts)
2095
2096
2096 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
2097 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
2097
2098
2098 # _chunkraw was renamed to _getsegmentforrevs.
2099 # _chunkraw was renamed to _getsegmentforrevs.
2099 try:
2100 try:
2100 segmentforrevs = rl._getsegmentforrevs
2101 segmentforrevs = rl._getsegmentforrevs
2101 except AttributeError:
2102 except AttributeError:
2102 segmentforrevs = rl._chunkraw
2103 segmentforrevs = rl._chunkraw
2103
2104
2104 # Verify engines argument.
2105 # Verify engines argument.
2105 if engines:
2106 if engines:
2106 engines = set(e.strip() for e in engines.split(b','))
2107 engines = set(e.strip() for e in engines.split(b','))
2107 for engine in engines:
2108 for engine in engines:
2108 try:
2109 try:
2109 util.compressionengines[engine]
2110 util.compressionengines[engine]
2110 except KeyError:
2111 except KeyError:
2111 raise error.Abort(b'unknown compression engine: %s' % engine)
2112 raise error.Abort(b'unknown compression engine: %s' % engine)
2112 else:
2113 else:
2113 engines = []
2114 engines = []
2114 for e in util.compengines:
2115 for e in util.compengines:
2115 engine = util.compengines[e]
2116 engine = util.compengines[e]
2116 try:
2117 try:
2117 if engine.available():
2118 if engine.available():
2118 engine.revlogcompressor().compress(b'dummy')
2119 engine.revlogcompressor().compress(b'dummy')
2119 engines.append(e)
2120 engines.append(e)
2120 except NotImplementedError:
2121 except NotImplementedError:
2121 pass
2122 pass
2122
2123
2123 revs = list(rl.revs(startrev, len(rl) - 1))
2124 revs = list(rl.revs(startrev, len(rl) - 1))
2124
2125
2125 def rlfh(rl):
2126 def rlfh(rl):
2126 if rl._inline:
2127 if rl._inline:
2127 return getsvfs(repo)(rl.indexfile)
2128 return getsvfs(repo)(rl.indexfile)
2128 else:
2129 else:
2129 return getsvfs(repo)(rl.datafile)
2130 return getsvfs(repo)(rl.datafile)
2130
2131
2131 def doread():
2132 def doread():
2132 rl.clearcaches()
2133 rl.clearcaches()
2133 for rev in revs:
2134 for rev in revs:
2134 segmentforrevs(rev, rev)
2135 segmentforrevs(rev, rev)
2135
2136
2136 def doreadcachedfh():
2137 def doreadcachedfh():
2137 rl.clearcaches()
2138 rl.clearcaches()
2138 fh = rlfh(rl)
2139 fh = rlfh(rl)
2139 for rev in revs:
2140 for rev in revs:
2140 segmentforrevs(rev, rev, df=fh)
2141 segmentforrevs(rev, rev, df=fh)
2141
2142
2142 def doreadbatch():
2143 def doreadbatch():
2143 rl.clearcaches()
2144 rl.clearcaches()
2144 segmentforrevs(revs[0], revs[-1])
2145 segmentforrevs(revs[0], revs[-1])
2145
2146
2146 def doreadbatchcachedfh():
2147 def doreadbatchcachedfh():
2147 rl.clearcaches()
2148 rl.clearcaches()
2148 fh = rlfh(rl)
2149 fh = rlfh(rl)
2149 segmentforrevs(revs[0], revs[-1], df=fh)
2150 segmentforrevs(revs[0], revs[-1], df=fh)
2150
2151
2151 def dochunk():
2152 def dochunk():
2152 rl.clearcaches()
2153 rl.clearcaches()
2153 fh = rlfh(rl)
2154 fh = rlfh(rl)
2154 for rev in revs:
2155 for rev in revs:
2155 rl._chunk(rev, df=fh)
2156 rl._chunk(rev, df=fh)
2156
2157
2157 chunks = [None]
2158 chunks = [None]
2158
2159
2159 def dochunkbatch():
2160 def dochunkbatch():
2160 rl.clearcaches()
2161 rl.clearcaches()
2161 fh = rlfh(rl)
2162 fh = rlfh(rl)
2162 # Save chunks as a side-effect.
2163 # Save chunks as a side-effect.
2163 chunks[0] = rl._chunks(revs, df=fh)
2164 chunks[0] = rl._chunks(revs, df=fh)
2164
2165
2165 def docompress(compressor):
2166 def docompress(compressor):
2166 rl.clearcaches()
2167 rl.clearcaches()
2167
2168
2168 try:
2169 try:
2169 # Swap in the requested compression engine.
2170 # Swap in the requested compression engine.
2170 oldcompressor = rl._compressor
2171 oldcompressor = rl._compressor
2171 rl._compressor = compressor
2172 rl._compressor = compressor
2172 for chunk in chunks[0]:
2173 for chunk in chunks[0]:
2173 rl.compress(chunk)
2174 rl.compress(chunk)
2174 finally:
2175 finally:
2175 rl._compressor = oldcompressor
2176 rl._compressor = oldcompressor
2176
2177
2177 benches = [
2178 benches = [
2178 (lambda: doread(), b'read'),
2179 (lambda: doread(), b'read'),
2179 (lambda: doreadcachedfh(), b'read w/ reused fd'),
2180 (lambda: doreadcachedfh(), b'read w/ reused fd'),
2180 (lambda: doreadbatch(), b'read batch'),
2181 (lambda: doreadbatch(), b'read batch'),
2181 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
2182 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
2182 (lambda: dochunk(), b'chunk'),
2183 (lambda: dochunk(), b'chunk'),
2183 (lambda: dochunkbatch(), b'chunk batch'),
2184 (lambda: dochunkbatch(), b'chunk batch'),
2184 ]
2185 ]
2185
2186
2186 for engine in sorted(engines):
2187 for engine in sorted(engines):
2187 compressor = util.compengines[engine].revlogcompressor()
2188 compressor = util.compengines[engine].revlogcompressor()
2188 benches.append((functools.partial(docompress, compressor),
2189 benches.append((functools.partial(docompress, compressor),
2189 b'compress w/ %s' % engine))
2190 b'compress w/ %s' % engine))
2190
2191
2191 for fn, title in benches:
2192 for fn, title in benches:
2192 timer, fm = gettimer(ui, opts)
2193 timer, fm = gettimer(ui, opts)
2193 timer(fn, title=title)
2194 timer(fn, title=title)
2194 fm.end()
2195 fm.end()
2195
2196
2196 @command(b'perfrevlogrevision', revlogopts + formatteropts +
2197 @command(b'perfrevlogrevision', revlogopts + formatteropts +
2197 [(b'', b'cache', False, b'use caches instead of clearing')],
2198 [(b'', b'cache', False, b'use caches instead of clearing')],
2198 b'-c|-m|FILE REV')
2199 b'-c|-m|FILE REV')
2199 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
2200 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
2200 """Benchmark obtaining a revlog revision.
2201 """Benchmark obtaining a revlog revision.
2201
2202
2202 Obtaining a revlog revision consists of roughly the following steps:
2203 Obtaining a revlog revision consists of roughly the following steps:
2203
2204
2204 1. Compute the delta chain
2205 1. Compute the delta chain
2205 2. Slice the delta chain if applicable
2206 2. Slice the delta chain if applicable
2206 3. Obtain the raw chunks for that delta chain
2207 3. Obtain the raw chunks for that delta chain
2207 4. Decompress each raw chunk
2208 4. Decompress each raw chunk
2208 5. Apply binary patches to obtain fulltext
2209 5. Apply binary patches to obtain fulltext
2209 6. Verify hash of fulltext
2210 6. Verify hash of fulltext
2210
2211
2211 This command measures the time spent in each of these phases.
2212 This command measures the time spent in each of these phases.
2212 """
2213 """
2213 opts = _byteskwargs(opts)
2214 opts = _byteskwargs(opts)
2214
2215
2215 if opts.get(b'changelog') or opts.get(b'manifest'):
2216 if opts.get(b'changelog') or opts.get(b'manifest'):
2216 file_, rev = None, file_
2217 file_, rev = None, file_
2217 elif rev is None:
2218 elif rev is None:
2218 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
2219 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
2219
2220
2220 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
2221 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
2221
2222
2222 # _chunkraw was renamed to _getsegmentforrevs.
2223 # _chunkraw was renamed to _getsegmentforrevs.
2223 try:
2224 try:
2224 segmentforrevs = r._getsegmentforrevs
2225 segmentforrevs = r._getsegmentforrevs
2225 except AttributeError:
2226 except AttributeError:
2226 segmentforrevs = r._chunkraw
2227 segmentforrevs = r._chunkraw
2227
2228
2228 node = r.lookup(rev)
2229 node = r.lookup(rev)
2229 rev = r.rev(node)
2230 rev = r.rev(node)
2230
2231
2231 def getrawchunks(data, chain):
2232 def getrawchunks(data, chain):
2232 start = r.start
2233 start = r.start
2233 length = r.length
2234 length = r.length
2234 inline = r._inline
2235 inline = r._inline
2235 iosize = r._io.size
2236 iosize = r._io.size
2236 buffer = util.buffer
2237 buffer = util.buffer
2237
2238
2238 chunks = []
2239 chunks = []
2239 ladd = chunks.append
2240 ladd = chunks.append
2240 for idx, item in enumerate(chain):
2241 for idx, item in enumerate(chain):
2241 offset = start(item[0])
2242 offset = start(item[0])
2242 bits = data[idx]
2243 bits = data[idx]
2243 for rev in item:
2244 for rev in item:
2244 chunkstart = start(rev)
2245 chunkstart = start(rev)
2245 if inline:
2246 if inline:
2246 chunkstart += (rev + 1) * iosize
2247 chunkstart += (rev + 1) * iosize
2247 chunklength = length(rev)
2248 chunklength = length(rev)
2248 ladd(buffer(bits, chunkstart - offset, chunklength))
2249 ladd(buffer(bits, chunkstart - offset, chunklength))
2249
2250
2250 return chunks
2251 return chunks
2251
2252
2252 def dodeltachain(rev):
2253 def dodeltachain(rev):
2253 if not cache:
2254 if not cache:
2254 r.clearcaches()
2255 r.clearcaches()
2255 r._deltachain(rev)
2256 r._deltachain(rev)
2256
2257
2257 def doread(chain):
2258 def doread(chain):
2258 if not cache:
2259 if not cache:
2259 r.clearcaches()
2260 r.clearcaches()
2260 for item in slicedchain:
2261 for item in slicedchain:
2261 segmentforrevs(item[0], item[-1])
2262 segmentforrevs(item[0], item[-1])
2262
2263
2263 def doslice(r, chain, size):
2264 def doslice(r, chain, size):
2264 for s in slicechunk(r, chain, targetsize=size):
2265 for s in slicechunk(r, chain, targetsize=size):
2265 pass
2266 pass
2266
2267
2267 def dorawchunks(data, chain):
2268 def dorawchunks(data, chain):
2268 if not cache:
2269 if not cache:
2269 r.clearcaches()
2270 r.clearcaches()
2270 getrawchunks(data, chain)
2271 getrawchunks(data, chain)
2271
2272
2272 def dodecompress(chunks):
2273 def dodecompress(chunks):
2273 decomp = r.decompress
2274 decomp = r.decompress
2274 for chunk in chunks:
2275 for chunk in chunks:
2275 decomp(chunk)
2276 decomp(chunk)
2276
2277
2277 def dopatch(text, bins):
2278 def dopatch(text, bins):
2278 if not cache:
2279 if not cache:
2279 r.clearcaches()
2280 r.clearcaches()
2280 mdiff.patches(text, bins)
2281 mdiff.patches(text, bins)
2281
2282
2282 def dohash(text):
2283 def dohash(text):
2283 if not cache:
2284 if not cache:
2284 r.clearcaches()
2285 r.clearcaches()
2285 r.checkhash(text, node, rev=rev)
2286 r.checkhash(text, node, rev=rev)
2286
2287
2287 def dorevision():
2288 def dorevision():
2288 if not cache:
2289 if not cache:
2289 r.clearcaches()
2290 r.clearcaches()
2290 r.revision(node)
2291 r.revision(node)
2291
2292
2292 try:
2293 try:
2293 from mercurial.revlogutils.deltas import slicechunk
2294 from mercurial.revlogutils.deltas import slicechunk
2294 except ImportError:
2295 except ImportError:
2295 slicechunk = getattr(revlog, '_slicechunk', None)
2296 slicechunk = getattr(revlog, '_slicechunk', None)
2296
2297
2297 size = r.length(rev)
2298 size = r.length(rev)
2298 chain = r._deltachain(rev)[0]
2299 chain = r._deltachain(rev)[0]
2299 if not getattr(r, '_withsparseread', False):
2300 if not getattr(r, '_withsparseread', False):
2300 slicedchain = (chain,)
2301 slicedchain = (chain,)
2301 else:
2302 else:
2302 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
2303 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
2303 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
2304 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
2304 rawchunks = getrawchunks(data, slicedchain)
2305 rawchunks = getrawchunks(data, slicedchain)
2305 bins = r._chunks(chain)
2306 bins = r._chunks(chain)
2306 text = bytes(bins[0])
2307 text = bytes(bins[0])
2307 bins = bins[1:]
2308 bins = bins[1:]
2308 text = mdiff.patches(text, bins)
2309 text = mdiff.patches(text, bins)
2309
2310
2310 benches = [
2311 benches = [
2311 (lambda: dorevision(), b'full'),
2312 (lambda: dorevision(), b'full'),
2312 (lambda: dodeltachain(rev), b'deltachain'),
2313 (lambda: dodeltachain(rev), b'deltachain'),
2313 (lambda: doread(chain), b'read'),
2314 (lambda: doread(chain), b'read'),
2314 ]
2315 ]
2315
2316
2316 if getattr(r, '_withsparseread', False):
2317 if getattr(r, '_withsparseread', False):
2317 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
2318 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
2318 benches.append(slicing)
2319 benches.append(slicing)
2319
2320
2320 benches.extend([
2321 benches.extend([
2321 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
2322 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
2322 (lambda: dodecompress(rawchunks), b'decompress'),
2323 (lambda: dodecompress(rawchunks), b'decompress'),
2323 (lambda: dopatch(text, bins), b'patch'),
2324 (lambda: dopatch(text, bins), b'patch'),
2324 (lambda: dohash(text), b'hash'),
2325 (lambda: dohash(text), b'hash'),
2325 ])
2326 ])
2326
2327
2327 timer, fm = gettimer(ui, opts)
2328 timer, fm = gettimer(ui, opts)
2328 for fn, title in benches:
2329 for fn, title in benches:
2329 timer(fn, title=title)
2330 timer(fn, title=title)
2330 fm.end()
2331 fm.end()
2331
2332
2332 @command(b'perfrevset',
2333 @command(b'perfrevset',
2333 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
2334 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
2334 (b'', b'contexts', False, b'obtain changectx for each revision')]
2335 (b'', b'contexts', False, b'obtain changectx for each revision')]
2335 + formatteropts, b"REVSET")
2336 + formatteropts, b"REVSET")
2336 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
2337 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
2337 """benchmark the execution time of a revset
2338 """benchmark the execution time of a revset
2338
2339
2339 Use the --clean option if need to evaluate the impact of build volatile
2340 Use the --clean option if need to evaluate the impact of build volatile
2340 revisions set cache on the revset execution. Volatile cache hold filtered
2341 revisions set cache on the revset execution. Volatile cache hold filtered
2341 and obsolete related cache."""
2342 and obsolete related cache."""
2342 opts = _byteskwargs(opts)
2343 opts = _byteskwargs(opts)
2343
2344
2344 timer, fm = gettimer(ui, opts)
2345 timer, fm = gettimer(ui, opts)
2345 def d():
2346 def d():
2346 if clear:
2347 if clear:
2347 repo.invalidatevolatilesets()
2348 repo.invalidatevolatilesets()
2348 if contexts:
2349 if contexts:
2349 for ctx in repo.set(expr): pass
2350 for ctx in repo.set(expr): pass
2350 else:
2351 else:
2351 for r in repo.revs(expr): pass
2352 for r in repo.revs(expr): pass
2352 timer(d)
2353 timer(d)
2353 fm.end()
2354 fm.end()
2354
2355
2355 @command(b'perfvolatilesets',
2356 @command(b'perfvolatilesets',
2356 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
2357 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
2357 ] + formatteropts)
2358 ] + formatteropts)
2358 def perfvolatilesets(ui, repo, *names, **opts):
2359 def perfvolatilesets(ui, repo, *names, **opts):
2359 """benchmark the computation of various volatile set
2360 """benchmark the computation of various volatile set
2360
2361
2361 Volatile set computes element related to filtering and obsolescence."""
2362 Volatile set computes element related to filtering and obsolescence."""
2362 opts = _byteskwargs(opts)
2363 opts = _byteskwargs(opts)
2363 timer, fm = gettimer(ui, opts)
2364 timer, fm = gettimer(ui, opts)
2364 repo = repo.unfiltered()
2365 repo = repo.unfiltered()
2365
2366
2366 def getobs(name):
2367 def getobs(name):
2367 def d():
2368 def d():
2368 repo.invalidatevolatilesets()
2369 repo.invalidatevolatilesets()
2369 if opts[b'clear_obsstore']:
2370 if opts[b'clear_obsstore']:
2370 clearfilecache(repo, b'obsstore')
2371 clearfilecache(repo, b'obsstore')
2371 obsolete.getrevs(repo, name)
2372 obsolete.getrevs(repo, name)
2372 return d
2373 return d
2373
2374
2374 allobs = sorted(obsolete.cachefuncs)
2375 allobs = sorted(obsolete.cachefuncs)
2375 if names:
2376 if names:
2376 allobs = [n for n in allobs if n in names]
2377 allobs = [n for n in allobs if n in names]
2377
2378
2378 for name in allobs:
2379 for name in allobs:
2379 timer(getobs(name), title=name)
2380 timer(getobs(name), title=name)
2380
2381
2381 def getfiltered(name):
2382 def getfiltered(name):
2382 def d():
2383 def d():
2383 repo.invalidatevolatilesets()
2384 repo.invalidatevolatilesets()
2384 if opts[b'clear_obsstore']:
2385 if opts[b'clear_obsstore']:
2385 clearfilecache(repo, b'obsstore')
2386 clearfilecache(repo, b'obsstore')
2386 repoview.filterrevs(repo, name)
2387 repoview.filterrevs(repo, name)
2387 return d
2388 return d
2388
2389
2389 allfilter = sorted(repoview.filtertable)
2390 allfilter = sorted(repoview.filtertable)
2390 if names:
2391 if names:
2391 allfilter = [n for n in allfilter if n in names]
2392 allfilter = [n for n in allfilter if n in names]
2392
2393
2393 for name in allfilter:
2394 for name in allfilter:
2394 timer(getfiltered(name), title=name)
2395 timer(getfiltered(name), title=name)
2395 fm.end()
2396 fm.end()
2396
2397
2397 @command(b'perfbranchmap',
2398 @command(b'perfbranchmap',
2398 [(b'f', b'full', False,
2399 [(b'f', b'full', False,
2399 b'Includes build time of subset'),
2400 b'Includes build time of subset'),
2400 (b'', b'clear-revbranch', False,
2401 (b'', b'clear-revbranch', False,
2401 b'purge the revbranch cache between computation'),
2402 b'purge the revbranch cache between computation'),
2402 ] + formatteropts)
2403 ] + formatteropts)
2403 def perfbranchmap(ui, repo, *filternames, **opts):
2404 def perfbranchmap(ui, repo, *filternames, **opts):
2404 """benchmark the update of a branchmap
2405 """benchmark the update of a branchmap
2405
2406
2406 This benchmarks the full repo.branchmap() call with read and write disabled
2407 This benchmarks the full repo.branchmap() call with read and write disabled
2407 """
2408 """
2408 opts = _byteskwargs(opts)
2409 opts = _byteskwargs(opts)
2409 full = opts.get(b"full", False)
2410 full = opts.get(b"full", False)
2410 clear_revbranch = opts.get(b"clear_revbranch", False)
2411 clear_revbranch = opts.get(b"clear_revbranch", False)
2411 timer, fm = gettimer(ui, opts)
2412 timer, fm = gettimer(ui, opts)
2412 def getbranchmap(filtername):
2413 def getbranchmap(filtername):
2413 """generate a benchmark function for the filtername"""
2414 """generate a benchmark function for the filtername"""
2414 if filtername is None:
2415 if filtername is None:
2415 view = repo
2416 view = repo
2416 else:
2417 else:
2417 view = repo.filtered(filtername)
2418 view = repo.filtered(filtername)
2418 if util.safehasattr(view._branchcaches, '_per_filter'):
2419 if util.safehasattr(view._branchcaches, '_per_filter'):
2419 filtered = view._branchcaches._per_filter
2420 filtered = view._branchcaches._per_filter
2420 else:
2421 else:
2421 # older versions
2422 # older versions
2422 filtered = view._branchcaches
2423 filtered = view._branchcaches
2423 def d():
2424 def d():
2424 if clear_revbranch:
2425 if clear_revbranch:
2425 repo.revbranchcache()._clear()
2426 repo.revbranchcache()._clear()
2426 if full:
2427 if full:
2427 view._branchcaches.clear()
2428 view._branchcaches.clear()
2428 else:
2429 else:
2429 filtered.pop(filtername, None)
2430 filtered.pop(filtername, None)
2430 view.branchmap()
2431 view.branchmap()
2431 return d
2432 return d
2432 # add filter in smaller subset to bigger subset
2433 # add filter in smaller subset to bigger subset
2433 possiblefilters = set(repoview.filtertable)
2434 possiblefilters = set(repoview.filtertable)
2434 if filternames:
2435 if filternames:
2435 possiblefilters &= set(filternames)
2436 possiblefilters &= set(filternames)
2436 subsettable = getbranchmapsubsettable()
2437 subsettable = getbranchmapsubsettable()
2437 allfilters = []
2438 allfilters = []
2438 while possiblefilters:
2439 while possiblefilters:
2439 for name in possiblefilters:
2440 for name in possiblefilters:
2440 subset = subsettable.get(name)
2441 subset = subsettable.get(name)
2441 if subset not in possiblefilters:
2442 if subset not in possiblefilters:
2442 break
2443 break
2443 else:
2444 else:
2444 assert False, b'subset cycle %s!' % possiblefilters
2445 assert False, b'subset cycle %s!' % possiblefilters
2445 allfilters.append(name)
2446 allfilters.append(name)
2446 possiblefilters.remove(name)
2447 possiblefilters.remove(name)
2447
2448
2448 # warm the cache
2449 # warm the cache
2449 if not full:
2450 if not full:
2450 for name in allfilters:
2451 for name in allfilters:
2451 repo.filtered(name).branchmap()
2452 repo.filtered(name).branchmap()
2452 if not filternames or b'unfiltered' in filternames:
2453 if not filternames or b'unfiltered' in filternames:
2453 # add unfiltered
2454 # add unfiltered
2454 allfilters.append(None)
2455 allfilters.append(None)
2455
2456
2456 if util.safehasattr(branchmap.branchcache, 'fromfile'):
2457 if util.safehasattr(branchmap.branchcache, 'fromfile'):
2457 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
2458 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
2458 branchcacheread.set(classmethod(lambda *args: None))
2459 branchcacheread.set(classmethod(lambda *args: None))
2459 else:
2460 else:
2460 # older versions
2461 # older versions
2461 branchcacheread = safeattrsetter(branchmap, b'read')
2462 branchcacheread = safeattrsetter(branchmap, b'read')
2462 branchcacheread.set(lambda *args: None)
2463 branchcacheread.set(lambda *args: None)
2463 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
2464 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
2464 branchcachewrite.set(lambda *args: None)
2465 branchcachewrite.set(lambda *args: None)
2465 try:
2466 try:
2466 for name in allfilters:
2467 for name in allfilters:
2467 printname = name
2468 printname = name
2468 if name is None:
2469 if name is None:
2469 printname = b'unfiltered'
2470 printname = b'unfiltered'
2470 timer(getbranchmap(name), title=str(printname))
2471 timer(getbranchmap(name), title=str(printname))
2471 finally:
2472 finally:
2472 branchcacheread.restore()
2473 branchcacheread.restore()
2473 branchcachewrite.restore()
2474 branchcachewrite.restore()
2474 fm.end()
2475 fm.end()
2475
2476
2476 @command(b'perfbranchmapupdate', [
2477 @command(b'perfbranchmapupdate', [
2477 (b'', b'base', [], b'subset of revision to start from'),
2478 (b'', b'base', [], b'subset of revision to start from'),
2478 (b'', b'target', [], b'subset of revision to end with'),
2479 (b'', b'target', [], b'subset of revision to end with'),
2479 (b'', b'clear-caches', False, b'clear cache between each runs')
2480 (b'', b'clear-caches', False, b'clear cache between each runs')
2480 ] + formatteropts)
2481 ] + formatteropts)
2481 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
2482 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
2482 """benchmark branchmap update from for <base> revs to <target> revs
2483 """benchmark branchmap update from for <base> revs to <target> revs
2483
2484
2484 If `--clear-caches` is passed, the following items will be reset before
2485 If `--clear-caches` is passed, the following items will be reset before
2485 each update:
2486 each update:
2486 * the changelog instance and associated indexes
2487 * the changelog instance and associated indexes
2487 * the rev-branch-cache instance
2488 * the rev-branch-cache instance
2488
2489
2489 Examples:
2490 Examples:
2490
2491
2491 # update for the one last revision
2492 # update for the one last revision
2492 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
2493 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
2493
2494
2494 $ update for change coming with a new branch
2495 $ update for change coming with a new branch
2495 $ hg perfbranchmapupdate --base 'stable' --target 'default'
2496 $ hg perfbranchmapupdate --base 'stable' --target 'default'
2496 """
2497 """
2497 from mercurial import branchmap
2498 from mercurial import branchmap
2498 from mercurial import repoview
2499 from mercurial import repoview
2499 opts = _byteskwargs(opts)
2500 opts = _byteskwargs(opts)
2500 timer, fm = gettimer(ui, opts)
2501 timer, fm = gettimer(ui, opts)
2501 clearcaches = opts[b'clear_caches']
2502 clearcaches = opts[b'clear_caches']
2502 unfi = repo.unfiltered()
2503 unfi = repo.unfiltered()
2503 x = [None] # used to pass data between closure
2504 x = [None] # used to pass data between closure
2504
2505
2505 # we use a `list` here to avoid possible side effect from smartset
2506 # we use a `list` here to avoid possible side effect from smartset
2506 baserevs = list(scmutil.revrange(repo, base))
2507 baserevs = list(scmutil.revrange(repo, base))
2507 targetrevs = list(scmutil.revrange(repo, target))
2508 targetrevs = list(scmutil.revrange(repo, target))
2508 if not baserevs:
2509 if not baserevs:
2509 raise error.Abort(b'no revisions selected for --base')
2510 raise error.Abort(b'no revisions selected for --base')
2510 if not targetrevs:
2511 if not targetrevs:
2511 raise error.Abort(b'no revisions selected for --target')
2512 raise error.Abort(b'no revisions selected for --target')
2512
2513
2513 # make sure the target branchmap also contains the one in the base
2514 # make sure the target branchmap also contains the one in the base
2514 targetrevs = list(set(baserevs) | set(targetrevs))
2515 targetrevs = list(set(baserevs) | set(targetrevs))
2515 targetrevs.sort()
2516 targetrevs.sort()
2516
2517
2517 cl = repo.changelog
2518 cl = repo.changelog
2518 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
2519 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
2519 allbaserevs.sort()
2520 allbaserevs.sort()
2520 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
2521 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
2521
2522
2522 newrevs = list(alltargetrevs.difference(allbaserevs))
2523 newrevs = list(alltargetrevs.difference(allbaserevs))
2523 newrevs.sort()
2524 newrevs.sort()
2524
2525
2525 allrevs = frozenset(unfi.changelog.revs())
2526 allrevs = frozenset(unfi.changelog.revs())
2526 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
2527 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
2527 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
2528 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
2528
2529
2529 def basefilter(repo, visibilityexceptions=None):
2530 def basefilter(repo, visibilityexceptions=None):
2530 return basefilterrevs
2531 return basefilterrevs
2531
2532
2532 def targetfilter(repo, visibilityexceptions=None):
2533 def targetfilter(repo, visibilityexceptions=None):
2533 return targetfilterrevs
2534 return targetfilterrevs
2534
2535
2535 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
2536 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
2536 ui.status(msg % (len(allbaserevs), len(newrevs)))
2537 ui.status(msg % (len(allbaserevs), len(newrevs)))
2537 if targetfilterrevs:
2538 if targetfilterrevs:
2538 msg = b'(%d revisions still filtered)\n'
2539 msg = b'(%d revisions still filtered)\n'
2539 ui.status(msg % len(targetfilterrevs))
2540 ui.status(msg % len(targetfilterrevs))
2540
2541
2541 try:
2542 try:
2542 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
2543 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
2543 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
2544 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
2544
2545
2545 baserepo = repo.filtered(b'__perf_branchmap_update_base')
2546 baserepo = repo.filtered(b'__perf_branchmap_update_base')
2546 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
2547 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
2547
2548
2548 # try to find an existing branchmap to reuse
2549 # try to find an existing branchmap to reuse
2549 subsettable = getbranchmapsubsettable()
2550 subsettable = getbranchmapsubsettable()
2550 candidatefilter = subsettable.get(None)
2551 candidatefilter = subsettable.get(None)
2551 while candidatefilter is not None:
2552 while candidatefilter is not None:
2552 candidatebm = repo.filtered(candidatefilter).branchmap()
2553 candidatebm = repo.filtered(candidatefilter).branchmap()
2553 if candidatebm.validfor(baserepo):
2554 if candidatebm.validfor(baserepo):
2554 filtered = repoview.filterrevs(repo, candidatefilter)
2555 filtered = repoview.filterrevs(repo, candidatefilter)
2555 missing = [r for r in allbaserevs if r in filtered]
2556 missing = [r for r in allbaserevs if r in filtered]
2556 base = candidatebm.copy()
2557 base = candidatebm.copy()
2557 base.update(baserepo, missing)
2558 base.update(baserepo, missing)
2558 break
2559 break
2559 candidatefilter = subsettable.get(candidatefilter)
2560 candidatefilter = subsettable.get(candidatefilter)
2560 else:
2561 else:
2561 # no suitable subset where found
2562 # no suitable subset where found
2562 base = branchmap.branchcache()
2563 base = branchmap.branchcache()
2563 base.update(baserepo, allbaserevs)
2564 base.update(baserepo, allbaserevs)
2564
2565
2565 def setup():
2566 def setup():
2566 x[0] = base.copy()
2567 x[0] = base.copy()
2567 if clearcaches:
2568 if clearcaches:
2568 unfi._revbranchcache = None
2569 unfi._revbranchcache = None
2569 clearchangelog(repo)
2570 clearchangelog(repo)
2570
2571
2571 def bench():
2572 def bench():
2572 x[0].update(targetrepo, newrevs)
2573 x[0].update(targetrepo, newrevs)
2573
2574
2574 timer(bench, setup=setup)
2575 timer(bench, setup=setup)
2575 fm.end()
2576 fm.end()
2576 finally:
2577 finally:
2577 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
2578 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
2578 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
2579 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
2579
2580
2580 @command(b'perfbranchmapload', [
2581 @command(b'perfbranchmapload', [
2581 (b'f', b'filter', b'', b'Specify repoview filter'),
2582 (b'f', b'filter', b'', b'Specify repoview filter'),
2582 (b'', b'list', False, b'List brachmap filter caches'),
2583 (b'', b'list', False, b'List brachmap filter caches'),
2583 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
2584 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
2584
2585
2585 ] + formatteropts)
2586 ] + formatteropts)
2586 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
2587 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
2587 """benchmark reading the branchmap"""
2588 """benchmark reading the branchmap"""
2588 opts = _byteskwargs(opts)
2589 opts = _byteskwargs(opts)
2589 clearrevlogs = opts[b'clear_revlogs']
2590 clearrevlogs = opts[b'clear_revlogs']
2590
2591
2591 if list:
2592 if list:
2592 for name, kind, st in repo.cachevfs.readdir(stat=True):
2593 for name, kind, st in repo.cachevfs.readdir(stat=True):
2593 if name.startswith(b'branch2'):
2594 if name.startswith(b'branch2'):
2594 filtername = name.partition(b'-')[2] or b'unfiltered'
2595 filtername = name.partition(b'-')[2] or b'unfiltered'
2595 ui.status(b'%s - %s\n'
2596 ui.status(b'%s - %s\n'
2596 % (filtername, util.bytecount(st.st_size)))
2597 % (filtername, util.bytecount(st.st_size)))
2597 return
2598 return
2598 if not filter:
2599 if not filter:
2599 filter = None
2600 filter = None
2600 subsettable = getbranchmapsubsettable()
2601 subsettable = getbranchmapsubsettable()
2601 if filter is None:
2602 if filter is None:
2602 repo = repo.unfiltered()
2603 repo = repo.unfiltered()
2603 else:
2604 else:
2604 repo = repoview.repoview(repo, filter)
2605 repo = repoview.repoview(repo, filter)
2605
2606
2606 repo.branchmap() # make sure we have a relevant, up to date branchmap
2607 repo.branchmap() # make sure we have a relevant, up to date branchmap
2607
2608
2608 try:
2609 try:
2609 fromfile = branchmap.branchcache.fromfile
2610 fromfile = branchmap.branchcache.fromfile
2610 except AttributeError:
2611 except AttributeError:
2611 # older versions
2612 # older versions
2612 fromfile = branchmap.read
2613 fromfile = branchmap.read
2613
2614
2614 currentfilter = filter
2615 currentfilter = filter
2615 # try once without timer, the filter may not be cached
2616 # try once without timer, the filter may not be cached
2616 while fromfile(repo) is None:
2617 while fromfile(repo) is None:
2617 currentfilter = subsettable.get(currentfilter)
2618 currentfilter = subsettable.get(currentfilter)
2618 if currentfilter is None:
2619 if currentfilter is None:
2619 raise error.Abort(b'No branchmap cached for %s repo'
2620 raise error.Abort(b'No branchmap cached for %s repo'
2620 % (filter or b'unfiltered'))
2621 % (filter or b'unfiltered'))
2621 repo = repo.filtered(currentfilter)
2622 repo = repo.filtered(currentfilter)
2622 timer, fm = gettimer(ui, opts)
2623 timer, fm = gettimer(ui, opts)
2623 def setup():
2624 def setup():
2624 if clearrevlogs:
2625 if clearrevlogs:
2625 clearchangelog(repo)
2626 clearchangelog(repo)
2626 def bench():
2627 def bench():
2627 fromfile(repo)
2628 fromfile(repo)
2628 timer(bench, setup=setup)
2629 timer(bench, setup=setup)
2629 fm.end()
2630 fm.end()
2630
2631
2631 @command(b'perfloadmarkers')
2632 @command(b'perfloadmarkers')
2632 def perfloadmarkers(ui, repo):
2633 def perfloadmarkers(ui, repo):
2633 """benchmark the time to parse the on-disk markers for a repo
2634 """benchmark the time to parse the on-disk markers for a repo
2634
2635
2635 Result is the number of markers in the repo."""
2636 Result is the number of markers in the repo."""
2636 timer, fm = gettimer(ui)
2637 timer, fm = gettimer(ui)
2637 svfs = getsvfs(repo)
2638 svfs = getsvfs(repo)
2638 timer(lambda: len(obsolete.obsstore(svfs)))
2639 timer(lambda: len(obsolete.obsstore(svfs)))
2639 fm.end()
2640 fm.end()
2640
2641
2641 @command(b'perflrucachedict', formatteropts +
2642 @command(b'perflrucachedict', formatteropts +
2642 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
2643 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
2643 (b'', b'mincost', 0, b'smallest cost of items in cache'),
2644 (b'', b'mincost', 0, b'smallest cost of items in cache'),
2644 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
2645 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
2645 (b'', b'size', 4, b'size of cache'),
2646 (b'', b'size', 4, b'size of cache'),
2646 (b'', b'gets', 10000, b'number of key lookups'),
2647 (b'', b'gets', 10000, b'number of key lookups'),
2647 (b'', b'sets', 10000, b'number of key sets'),
2648 (b'', b'sets', 10000, b'number of key sets'),
2648 (b'', b'mixed', 10000, b'number of mixed mode operations'),
2649 (b'', b'mixed', 10000, b'number of mixed mode operations'),
2649 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
2650 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
2650 norepo=True)
2651 norepo=True)
2651 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
2652 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
2652 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
2653 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
2653 opts = _byteskwargs(opts)
2654 opts = _byteskwargs(opts)
2654
2655
2655 def doinit():
2656 def doinit():
2656 for i in _xrange(10000):
2657 for i in _xrange(10000):
2657 util.lrucachedict(size)
2658 util.lrucachedict(size)
2658
2659
2659 costrange = list(range(mincost, maxcost + 1))
2660 costrange = list(range(mincost, maxcost + 1))
2660
2661
2661 values = []
2662 values = []
2662 for i in _xrange(size):
2663 for i in _xrange(size):
2663 values.append(random.randint(0, _maxint))
2664 values.append(random.randint(0, _maxint))
2664
2665
2665 # Get mode fills the cache and tests raw lookup performance with no
2666 # Get mode fills the cache and tests raw lookup performance with no
2666 # eviction.
2667 # eviction.
2667 getseq = []
2668 getseq = []
2668 for i in _xrange(gets):
2669 for i in _xrange(gets):
2669 getseq.append(random.choice(values))
2670 getseq.append(random.choice(values))
2670
2671
2671 def dogets():
2672 def dogets():
2672 d = util.lrucachedict(size)
2673 d = util.lrucachedict(size)
2673 for v in values:
2674 for v in values:
2674 d[v] = v
2675 d[v] = v
2675 for key in getseq:
2676 for key in getseq:
2676 value = d[key]
2677 value = d[key]
2677 value # silence pyflakes warning
2678 value # silence pyflakes warning
2678
2679
2679 def dogetscost():
2680 def dogetscost():
2680 d = util.lrucachedict(size, maxcost=costlimit)
2681 d = util.lrucachedict(size, maxcost=costlimit)
2681 for i, v in enumerate(values):
2682 for i, v in enumerate(values):
2682 d.insert(v, v, cost=costs[i])
2683 d.insert(v, v, cost=costs[i])
2683 for key in getseq:
2684 for key in getseq:
2684 try:
2685 try:
2685 value = d[key]
2686 value = d[key]
2686 value # silence pyflakes warning
2687 value # silence pyflakes warning
2687 except KeyError:
2688 except KeyError:
2688 pass
2689 pass
2689
2690
2690 # Set mode tests insertion speed with cache eviction.
2691 # Set mode tests insertion speed with cache eviction.
2691 setseq = []
2692 setseq = []
2692 costs = []
2693 costs = []
2693 for i in _xrange(sets):
2694 for i in _xrange(sets):
2694 setseq.append(random.randint(0, _maxint))
2695 setseq.append(random.randint(0, _maxint))
2695 costs.append(random.choice(costrange))
2696 costs.append(random.choice(costrange))
2696
2697
2697 def doinserts():
2698 def doinserts():
2698 d = util.lrucachedict(size)
2699 d = util.lrucachedict(size)
2699 for v in setseq:
2700 for v in setseq:
2700 d.insert(v, v)
2701 d.insert(v, v)
2701
2702
2702 def doinsertscost():
2703 def doinsertscost():
2703 d = util.lrucachedict(size, maxcost=costlimit)
2704 d = util.lrucachedict(size, maxcost=costlimit)
2704 for i, v in enumerate(setseq):
2705 for i, v in enumerate(setseq):
2705 d.insert(v, v, cost=costs[i])
2706 d.insert(v, v, cost=costs[i])
2706
2707
2707 def dosets():
2708 def dosets():
2708 d = util.lrucachedict(size)
2709 d = util.lrucachedict(size)
2709 for v in setseq:
2710 for v in setseq:
2710 d[v] = v
2711 d[v] = v
2711
2712
2712 # Mixed mode randomly performs gets and sets with eviction.
2713 # Mixed mode randomly performs gets and sets with eviction.
2713 mixedops = []
2714 mixedops = []
2714 for i in _xrange(mixed):
2715 for i in _xrange(mixed):
2715 r = random.randint(0, 100)
2716 r = random.randint(0, 100)
2716 if r < mixedgetfreq:
2717 if r < mixedgetfreq:
2717 op = 0
2718 op = 0
2718 else:
2719 else:
2719 op = 1
2720 op = 1
2720
2721
2721 mixedops.append((op,
2722 mixedops.append((op,
2722 random.randint(0, size * 2),
2723 random.randint(0, size * 2),
2723 random.choice(costrange)))
2724 random.choice(costrange)))
2724
2725
2725 def domixed():
2726 def domixed():
2726 d = util.lrucachedict(size)
2727 d = util.lrucachedict(size)
2727
2728
2728 for op, v, cost in mixedops:
2729 for op, v, cost in mixedops:
2729 if op == 0:
2730 if op == 0:
2730 try:
2731 try:
2731 d[v]
2732 d[v]
2732 except KeyError:
2733 except KeyError:
2733 pass
2734 pass
2734 else:
2735 else:
2735 d[v] = v
2736 d[v] = v
2736
2737
2737 def domixedcost():
2738 def domixedcost():
2738 d = util.lrucachedict(size, maxcost=costlimit)
2739 d = util.lrucachedict(size, maxcost=costlimit)
2739
2740
2740 for op, v, cost in mixedops:
2741 for op, v, cost in mixedops:
2741 if op == 0:
2742 if op == 0:
2742 try:
2743 try:
2743 d[v]
2744 d[v]
2744 except KeyError:
2745 except KeyError:
2745 pass
2746 pass
2746 else:
2747 else:
2747 d.insert(v, v, cost=cost)
2748 d.insert(v, v, cost=cost)
2748
2749
2749 benches = [
2750 benches = [
2750 (doinit, b'init'),
2751 (doinit, b'init'),
2751 ]
2752 ]
2752
2753
2753 if costlimit:
2754 if costlimit:
2754 benches.extend([
2755 benches.extend([
2755 (dogetscost, b'gets w/ cost limit'),
2756 (dogetscost, b'gets w/ cost limit'),
2756 (doinsertscost, b'inserts w/ cost limit'),
2757 (doinsertscost, b'inserts w/ cost limit'),
2757 (domixedcost, b'mixed w/ cost limit'),
2758 (domixedcost, b'mixed w/ cost limit'),
2758 ])
2759 ])
2759 else:
2760 else:
2760 benches.extend([
2761 benches.extend([
2761 (dogets, b'gets'),
2762 (dogets, b'gets'),
2762 (doinserts, b'inserts'),
2763 (doinserts, b'inserts'),
2763 (dosets, b'sets'),
2764 (dosets, b'sets'),
2764 (domixed, b'mixed')
2765 (domixed, b'mixed')
2765 ])
2766 ])
2766
2767
2767 for fn, title in benches:
2768 for fn, title in benches:
2768 timer, fm = gettimer(ui, opts)
2769 timer, fm = gettimer(ui, opts)
2769 timer(fn, title=title)
2770 timer(fn, title=title)
2770 fm.end()
2771 fm.end()
2771
2772
2772 @command(b'perfwrite', formatteropts)
2773 @command(b'perfwrite', formatteropts)
2773 def perfwrite(ui, repo, **opts):
2774 def perfwrite(ui, repo, **opts):
2774 """microbenchmark ui.write
2775 """microbenchmark ui.write
2775 """
2776 """
2776 opts = _byteskwargs(opts)
2777 opts = _byteskwargs(opts)
2777
2778
2778 timer, fm = gettimer(ui, opts)
2779 timer, fm = gettimer(ui, opts)
2779 def write():
2780 def write():
2780 for i in range(100000):
2781 for i in range(100000):
2781 ui.write((b'Testing write performance\n'))
2782 ui.write((b'Testing write performance\n'))
2782 timer(write)
2783 timer(write)
2783 fm.end()
2784 fm.end()
2784
2785
2785 def uisetup(ui):
2786 def uisetup(ui):
2786 if (util.safehasattr(cmdutil, b'openrevlog') and
2787 if (util.safehasattr(cmdutil, b'openrevlog') and
2787 not util.safehasattr(commands, b'debugrevlogopts')):
2788 not util.safehasattr(commands, b'debugrevlogopts')):
2788 # for "historical portability":
2789 # for "historical portability":
2789 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
2790 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
2790 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
2791 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
2791 # openrevlog() should cause failure, because it has been
2792 # openrevlog() should cause failure, because it has been
2792 # available since 3.5 (or 49c583ca48c4).
2793 # available since 3.5 (or 49c583ca48c4).
2793 def openrevlog(orig, repo, cmd, file_, opts):
2794 def openrevlog(orig, repo, cmd, file_, opts):
2794 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
2795 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
2795 raise error.Abort(b"This version doesn't support --dir option",
2796 raise error.Abort(b"This version doesn't support --dir option",
2796 hint=b"use 3.5 or later")
2797 hint=b"use 3.5 or later")
2797 return orig(repo, cmd, file_, opts)
2798 return orig(repo, cmd, file_, opts)
2798 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
2799 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
2799
2800
2800 @command(b'perfprogress', formatteropts + [
2801 @command(b'perfprogress', formatteropts + [
2801 (b'', b'topic', b'topic', b'topic for progress messages'),
2802 (b'', b'topic', b'topic', b'topic for progress messages'),
2802 (b'c', b'total', 1000000, b'total value we are progressing to'),
2803 (b'c', b'total', 1000000, b'total value we are progressing to'),
2803 ], norepo=True)
2804 ], norepo=True)
2804 def perfprogress(ui, topic=None, total=None, **opts):
2805 def perfprogress(ui, topic=None, total=None, **opts):
2805 """printing of progress bars"""
2806 """printing of progress bars"""
2806 opts = _byteskwargs(opts)
2807 opts = _byteskwargs(opts)
2807
2808
2808 timer, fm = gettimer(ui, opts)
2809 timer, fm = gettimer(ui, opts)
2809
2810
2810 def doprogress():
2811 def doprogress():
2811 with ui.makeprogress(topic, total=total) as progress:
2812 with ui.makeprogress(topic, total=total) as progress:
2812 for i in pycompat.xrange(total):
2813 for i in pycompat.xrange(total):
2813 progress.increment()
2814 progress.increment()
2814
2815
2815 timer(doprogress)
2816 timer(doprogress)
2816 fm.end()
2817 fm.end()
General Comments 0
You need to be logged in to leave comments. Login now