##// END OF EJS Templates
perf: make sure to explicitly disable any profiler after the first iteration...
marmoute -
r42556:a09829e1 default
parent child Browse files
Show More
@@ -1,2901 +1,2904 b''
1 # perf.py - performance test routines
1 # perf.py - performance test routines
2 '''helper extension to measure performance
2 '''helper extension to measure performance
3
3
4 Configurations
4 Configurations
5 ==============
5 ==============
6
6
7 ``perf``
7 ``perf``
8 --------
8 --------
9
9
10 ``all-timing``
10 ``all-timing``
11 When set, additional statistics will be reported for each benchmark: best,
11 When set, additional statistics will be reported for each benchmark: best,
12 worst, median average. If not set only the best timing is reported
12 worst, median average. If not set only the best timing is reported
13 (default: off).
13 (default: off).
14
14
15 ``presleep``
15 ``presleep``
16 number of second to wait before any group of runs (default: 1)
16 number of second to wait before any group of runs (default: 1)
17
17
18 ``pre-run``
18 ``pre-run``
19 number of run to perform before starting measurement.
19 number of run to perform before starting measurement.
20
20
21 ``profile-benchmark``
21 ``profile-benchmark``
22 Enable profiling for the benchmarked section.
22 Enable profiling for the benchmarked section.
23 (The first iteration is benchmarked)
23 (The first iteration is benchmarked)
24
24
25 ``run-limits``
25 ``run-limits``
26 Control the number of runs each benchmark will perform. The option value
26 Control the number of runs each benchmark will perform. The option value
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
28 conditions are considered in order with the following logic:
28 conditions are considered in order with the following logic:
29
29
30 If benchmark has been running for <time> seconds, and we have performed
30 If benchmark has been running for <time> seconds, and we have performed
31 <numberofrun> iterations, stop the benchmark,
31 <numberofrun> iterations, stop the benchmark,
32
32
33 The default value is: `3.0-100, 10.0-3`
33 The default value is: `3.0-100, 10.0-3`
34
34
35 ``stub``
35 ``stub``
36 When set, benchmarks will only be run once, useful for testing
36 When set, benchmarks will only be run once, useful for testing
37 (default: off)
37 (default: off)
38 '''
38 '''
39
39
40 # "historical portability" policy of perf.py:
40 # "historical portability" policy of perf.py:
41 #
41 #
42 # We have to do:
42 # We have to do:
43 # - make perf.py "loadable" with as wide Mercurial version as possible
43 # - make perf.py "loadable" with as wide Mercurial version as possible
44 # This doesn't mean that perf commands work correctly with that Mercurial.
44 # This doesn't mean that perf commands work correctly with that Mercurial.
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
46 # - make historical perf command work correctly with as wide Mercurial
46 # - make historical perf command work correctly with as wide Mercurial
47 # version as possible
47 # version as possible
48 #
48 #
49 # We have to do, if possible with reasonable cost:
49 # We have to do, if possible with reasonable cost:
50 # - make recent perf command for historical feature work correctly
50 # - make recent perf command for historical feature work correctly
51 # with early Mercurial
51 # with early Mercurial
52 #
52 #
53 # We don't have to do:
53 # We don't have to do:
54 # - make perf command for recent feature work correctly with early
54 # - make perf command for recent feature work correctly with early
55 # Mercurial
55 # Mercurial
56
56
57 from __future__ import absolute_import
57 from __future__ import absolute_import
58 import contextlib
58 import contextlib
59 import functools
59 import functools
60 import gc
60 import gc
61 import os
61 import os
62 import random
62 import random
63 import shutil
63 import shutil
64 import struct
64 import struct
65 import sys
65 import sys
66 import tempfile
66 import tempfile
67 import threading
67 import threading
68 import time
68 import time
69 from mercurial import (
69 from mercurial import (
70 changegroup,
70 changegroup,
71 cmdutil,
71 cmdutil,
72 commands,
72 commands,
73 copies,
73 copies,
74 error,
74 error,
75 extensions,
75 extensions,
76 hg,
76 hg,
77 mdiff,
77 mdiff,
78 merge,
78 merge,
79 revlog,
79 revlog,
80 util,
80 util,
81 )
81 )
82
82
83 # for "historical portability":
83 # for "historical portability":
84 # try to import modules separately (in dict order), and ignore
84 # try to import modules separately (in dict order), and ignore
85 # failure, because these aren't available with early Mercurial
85 # failure, because these aren't available with early Mercurial
86 try:
86 try:
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
88 except ImportError:
88 except ImportError:
89 pass
89 pass
90 try:
90 try:
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
92 except ImportError:
92 except ImportError:
93 pass
93 pass
94 try:
94 try:
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
96 dir(registrar) # forcibly load it
96 dir(registrar) # forcibly load it
97 except ImportError:
97 except ImportError:
98 registrar = None
98 registrar = None
99 try:
99 try:
100 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
100 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
101 except ImportError:
101 except ImportError:
102 pass
102 pass
103 try:
103 try:
104 from mercurial.utils import repoviewutil # since 5.0
104 from mercurial.utils import repoviewutil # since 5.0
105 except ImportError:
105 except ImportError:
106 repoviewutil = None
106 repoviewutil = None
107 try:
107 try:
108 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
108 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
109 except ImportError:
109 except ImportError:
110 pass
110 pass
111 try:
111 try:
112 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
112 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
113 except ImportError:
113 except ImportError:
114 pass
114 pass
115
115
116 try:
116 try:
117 from mercurial import profiling
117 from mercurial import profiling
118 except ImportError:
118 except ImportError:
119 profiling = None
119 profiling = None
120
120
121 def identity(a):
121 def identity(a):
122 return a
122 return a
123
123
124 try:
124 try:
125 from mercurial import pycompat
125 from mercurial import pycompat
126 getargspec = pycompat.getargspec # added to module after 4.5
126 getargspec = pycompat.getargspec # added to module after 4.5
127 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
127 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
128 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
128 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
129 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
129 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
130 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
130 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
131 if pycompat.ispy3:
131 if pycompat.ispy3:
132 _maxint = sys.maxsize # per py3 docs for replacing maxint
132 _maxint = sys.maxsize # per py3 docs for replacing maxint
133 else:
133 else:
134 _maxint = sys.maxint
134 _maxint = sys.maxint
135 except (ImportError, AttributeError):
135 except (ImportError, AttributeError):
136 import inspect
136 import inspect
137 getargspec = inspect.getargspec
137 getargspec = inspect.getargspec
138 _byteskwargs = identity
138 _byteskwargs = identity
139 fsencode = identity # no py3 support
139 fsencode = identity # no py3 support
140 _maxint = sys.maxint # no py3 support
140 _maxint = sys.maxint # no py3 support
141 _sysstr = lambda x: x # no py3 support
141 _sysstr = lambda x: x # no py3 support
142 _xrange = xrange
142 _xrange = xrange
143
143
144 try:
144 try:
145 # 4.7+
145 # 4.7+
146 queue = pycompat.queue.Queue
146 queue = pycompat.queue.Queue
147 except (AttributeError, ImportError):
147 except (AttributeError, ImportError):
148 # <4.7.
148 # <4.7.
149 try:
149 try:
150 queue = pycompat.queue
150 queue = pycompat.queue
151 except (AttributeError, ImportError):
151 except (AttributeError, ImportError):
152 queue = util.queue
152 queue = util.queue
153
153
154 try:
154 try:
155 from mercurial import logcmdutil
155 from mercurial import logcmdutil
156 makelogtemplater = logcmdutil.maketemplater
156 makelogtemplater = logcmdutil.maketemplater
157 except (AttributeError, ImportError):
157 except (AttributeError, ImportError):
158 try:
158 try:
159 makelogtemplater = cmdutil.makelogtemplater
159 makelogtemplater = cmdutil.makelogtemplater
160 except (AttributeError, ImportError):
160 except (AttributeError, ImportError):
161 makelogtemplater = None
161 makelogtemplater = None
162
162
163 # for "historical portability":
163 # for "historical portability":
164 # define util.safehasattr forcibly, because util.safehasattr has been
164 # define util.safehasattr forcibly, because util.safehasattr has been
165 # available since 1.9.3 (or 94b200a11cf7)
165 # available since 1.9.3 (or 94b200a11cf7)
166 _undefined = object()
166 _undefined = object()
167 def safehasattr(thing, attr):
167 def safehasattr(thing, attr):
168 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
168 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
169 setattr(util, 'safehasattr', safehasattr)
169 setattr(util, 'safehasattr', safehasattr)
170
170
171 # for "historical portability":
171 # for "historical portability":
172 # define util.timer forcibly, because util.timer has been available
172 # define util.timer forcibly, because util.timer has been available
173 # since ae5d60bb70c9
173 # since ae5d60bb70c9
174 if safehasattr(time, 'perf_counter'):
174 if safehasattr(time, 'perf_counter'):
175 util.timer = time.perf_counter
175 util.timer = time.perf_counter
176 elif os.name == b'nt':
176 elif os.name == b'nt':
177 util.timer = time.clock
177 util.timer = time.clock
178 else:
178 else:
179 util.timer = time.time
179 util.timer = time.time
180
180
181 # for "historical portability":
181 # for "historical portability":
182 # use locally defined empty option list, if formatteropts isn't
182 # use locally defined empty option list, if formatteropts isn't
183 # available, because commands.formatteropts has been available since
183 # available, because commands.formatteropts has been available since
184 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
184 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
185 # available since 2.2 (or ae5f92e154d3)
185 # available since 2.2 (or ae5f92e154d3)
186 formatteropts = getattr(cmdutil, "formatteropts",
186 formatteropts = getattr(cmdutil, "formatteropts",
187 getattr(commands, "formatteropts", []))
187 getattr(commands, "formatteropts", []))
188
188
189 # for "historical portability":
189 # for "historical portability":
190 # use locally defined option list, if debugrevlogopts isn't available,
190 # use locally defined option list, if debugrevlogopts isn't available,
191 # because commands.debugrevlogopts has been available since 3.7 (or
191 # because commands.debugrevlogopts has been available since 3.7 (or
192 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
192 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
193 # since 1.9 (or a79fea6b3e77).
193 # since 1.9 (or a79fea6b3e77).
194 revlogopts = getattr(cmdutil, "debugrevlogopts",
194 revlogopts = getattr(cmdutil, "debugrevlogopts",
195 getattr(commands, "debugrevlogopts", [
195 getattr(commands, "debugrevlogopts", [
196 (b'c', b'changelog', False, (b'open changelog')),
196 (b'c', b'changelog', False, (b'open changelog')),
197 (b'm', b'manifest', False, (b'open manifest')),
197 (b'm', b'manifest', False, (b'open manifest')),
198 (b'', b'dir', False, (b'open directory manifest')),
198 (b'', b'dir', False, (b'open directory manifest')),
199 ]))
199 ]))
200
200
201 cmdtable = {}
201 cmdtable = {}
202
202
203 # for "historical portability":
203 # for "historical portability":
204 # define parsealiases locally, because cmdutil.parsealiases has been
204 # define parsealiases locally, because cmdutil.parsealiases has been
205 # available since 1.5 (or 6252852b4332)
205 # available since 1.5 (or 6252852b4332)
206 def parsealiases(cmd):
206 def parsealiases(cmd):
207 return cmd.split(b"|")
207 return cmd.split(b"|")
208
208
209 if safehasattr(registrar, 'command'):
209 if safehasattr(registrar, 'command'):
210 command = registrar.command(cmdtable)
210 command = registrar.command(cmdtable)
211 elif safehasattr(cmdutil, 'command'):
211 elif safehasattr(cmdutil, 'command'):
212 command = cmdutil.command(cmdtable)
212 command = cmdutil.command(cmdtable)
213 if b'norepo' not in getargspec(command).args:
213 if b'norepo' not in getargspec(command).args:
214 # for "historical portability":
214 # for "historical portability":
215 # wrap original cmdutil.command, because "norepo" option has
215 # wrap original cmdutil.command, because "norepo" option has
216 # been available since 3.1 (or 75a96326cecb)
216 # been available since 3.1 (or 75a96326cecb)
217 _command = command
217 _command = command
218 def command(name, options=(), synopsis=None, norepo=False):
218 def command(name, options=(), synopsis=None, norepo=False):
219 if norepo:
219 if norepo:
220 commands.norepo += b' %s' % b' '.join(parsealiases(name))
220 commands.norepo += b' %s' % b' '.join(parsealiases(name))
221 return _command(name, list(options), synopsis)
221 return _command(name, list(options), synopsis)
222 else:
222 else:
223 # for "historical portability":
223 # for "historical portability":
224 # define "@command" annotation locally, because cmdutil.command
224 # define "@command" annotation locally, because cmdutil.command
225 # has been available since 1.9 (or 2daa5179e73f)
225 # has been available since 1.9 (or 2daa5179e73f)
226 def command(name, options=(), synopsis=None, norepo=False):
226 def command(name, options=(), synopsis=None, norepo=False):
227 def decorator(func):
227 def decorator(func):
228 if synopsis:
228 if synopsis:
229 cmdtable[name] = func, list(options), synopsis
229 cmdtable[name] = func, list(options), synopsis
230 else:
230 else:
231 cmdtable[name] = func, list(options)
231 cmdtable[name] = func, list(options)
232 if norepo:
232 if norepo:
233 commands.norepo += b' %s' % b' '.join(parsealiases(name))
233 commands.norepo += b' %s' % b' '.join(parsealiases(name))
234 return func
234 return func
235 return decorator
235 return decorator
236
236
237 try:
237 try:
238 import mercurial.registrar
238 import mercurial.registrar
239 import mercurial.configitems
239 import mercurial.configitems
240 configtable = {}
240 configtable = {}
241 configitem = mercurial.registrar.configitem(configtable)
241 configitem = mercurial.registrar.configitem(configtable)
242 configitem(b'perf', b'presleep',
242 configitem(b'perf', b'presleep',
243 default=mercurial.configitems.dynamicdefault,
243 default=mercurial.configitems.dynamicdefault,
244 )
244 )
245 configitem(b'perf', b'stub',
245 configitem(b'perf', b'stub',
246 default=mercurial.configitems.dynamicdefault,
246 default=mercurial.configitems.dynamicdefault,
247 )
247 )
248 configitem(b'perf', b'parentscount',
248 configitem(b'perf', b'parentscount',
249 default=mercurial.configitems.dynamicdefault,
249 default=mercurial.configitems.dynamicdefault,
250 )
250 )
251 configitem(b'perf', b'all-timing',
251 configitem(b'perf', b'all-timing',
252 default=mercurial.configitems.dynamicdefault,
252 default=mercurial.configitems.dynamicdefault,
253 )
253 )
254 configitem(b'perf', b'pre-run',
254 configitem(b'perf', b'pre-run',
255 default=mercurial.configitems.dynamicdefault,
255 default=mercurial.configitems.dynamicdefault,
256 )
256 )
257 configitem(b'perf', b'profile-benchmark',
257 configitem(b'perf', b'profile-benchmark',
258 default=mercurial.configitems.dynamicdefault,
258 default=mercurial.configitems.dynamicdefault,
259 )
259 )
260 configitem(b'perf', b'run-limits',
260 configitem(b'perf', b'run-limits',
261 default=mercurial.configitems.dynamicdefault,
261 default=mercurial.configitems.dynamicdefault,
262 )
262 )
263 except (ImportError, AttributeError):
263 except (ImportError, AttributeError):
264 pass
264 pass
265
265
266 def getlen(ui):
266 def getlen(ui):
267 if ui.configbool(b"perf", b"stub", False):
267 if ui.configbool(b"perf", b"stub", False):
268 return lambda x: 1
268 return lambda x: 1
269 return len
269 return len
270
270
271 class noop(object):
271 class noop(object):
272 """dummy context manager"""
272 """dummy context manager"""
273 def __enter__(self):
273 def __enter__(self):
274 pass
274 pass
275 def __exit__(self, *args):
275 def __exit__(self, *args):
276 pass
276 pass
277
277
278 NOOPCTX = noop()
279
278 def gettimer(ui, opts=None):
280 def gettimer(ui, opts=None):
279 """return a timer function and formatter: (timer, formatter)
281 """return a timer function and formatter: (timer, formatter)
280
282
281 This function exists to gather the creation of formatter in a single
283 This function exists to gather the creation of formatter in a single
282 place instead of duplicating it in all performance commands."""
284 place instead of duplicating it in all performance commands."""
283
285
284 # enforce an idle period before execution to counteract power management
286 # enforce an idle period before execution to counteract power management
285 # experimental config: perf.presleep
287 # experimental config: perf.presleep
286 time.sleep(getint(ui, b"perf", b"presleep", 1))
288 time.sleep(getint(ui, b"perf", b"presleep", 1))
287
289
288 if opts is None:
290 if opts is None:
289 opts = {}
291 opts = {}
290 # redirect all to stderr unless buffer api is in use
292 # redirect all to stderr unless buffer api is in use
291 if not ui._buffers:
293 if not ui._buffers:
292 ui = ui.copy()
294 ui = ui.copy()
293 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
295 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
294 if uifout:
296 if uifout:
295 # for "historical portability":
297 # for "historical portability":
296 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
298 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
297 uifout.set(ui.ferr)
299 uifout.set(ui.ferr)
298
300
299 # get a formatter
301 # get a formatter
300 uiformatter = getattr(ui, 'formatter', None)
302 uiformatter = getattr(ui, 'formatter', None)
301 if uiformatter:
303 if uiformatter:
302 fm = uiformatter(b'perf', opts)
304 fm = uiformatter(b'perf', opts)
303 else:
305 else:
304 # for "historical portability":
306 # for "historical portability":
305 # define formatter locally, because ui.formatter has been
307 # define formatter locally, because ui.formatter has been
306 # available since 2.2 (or ae5f92e154d3)
308 # available since 2.2 (or ae5f92e154d3)
307 from mercurial import node
309 from mercurial import node
308 class defaultformatter(object):
310 class defaultformatter(object):
309 """Minimized composition of baseformatter and plainformatter
311 """Minimized composition of baseformatter and plainformatter
310 """
312 """
311 def __init__(self, ui, topic, opts):
313 def __init__(self, ui, topic, opts):
312 self._ui = ui
314 self._ui = ui
313 if ui.debugflag:
315 if ui.debugflag:
314 self.hexfunc = node.hex
316 self.hexfunc = node.hex
315 else:
317 else:
316 self.hexfunc = node.short
318 self.hexfunc = node.short
317 def __nonzero__(self):
319 def __nonzero__(self):
318 return False
320 return False
319 __bool__ = __nonzero__
321 __bool__ = __nonzero__
320 def startitem(self):
322 def startitem(self):
321 pass
323 pass
322 def data(self, **data):
324 def data(self, **data):
323 pass
325 pass
324 def write(self, fields, deftext, *fielddata, **opts):
326 def write(self, fields, deftext, *fielddata, **opts):
325 self._ui.write(deftext % fielddata, **opts)
327 self._ui.write(deftext % fielddata, **opts)
326 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
328 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
327 if cond:
329 if cond:
328 self._ui.write(deftext % fielddata, **opts)
330 self._ui.write(deftext % fielddata, **opts)
329 def plain(self, text, **opts):
331 def plain(self, text, **opts):
330 self._ui.write(text, **opts)
332 self._ui.write(text, **opts)
331 def end(self):
333 def end(self):
332 pass
334 pass
333 fm = defaultformatter(ui, b'perf', opts)
335 fm = defaultformatter(ui, b'perf', opts)
334
336
335 # stub function, runs code only once instead of in a loop
337 # stub function, runs code only once instead of in a loop
336 # experimental config: perf.stub
338 # experimental config: perf.stub
337 if ui.configbool(b"perf", b"stub", False):
339 if ui.configbool(b"perf", b"stub", False):
338 return functools.partial(stub_timer, fm), fm
340 return functools.partial(stub_timer, fm), fm
339
341
340 # experimental config: perf.all-timing
342 # experimental config: perf.all-timing
341 displayall = ui.configbool(b"perf", b"all-timing", False)
343 displayall = ui.configbool(b"perf", b"all-timing", False)
342
344
343 # experimental config: perf.run-limits
345 # experimental config: perf.run-limits
344 limitspec = ui.configlist(b"perf", b"run-limits", [])
346 limitspec = ui.configlist(b"perf", b"run-limits", [])
345 limits = []
347 limits = []
346 for item in limitspec:
348 for item in limitspec:
347 parts = item.split(b'-', 1)
349 parts = item.split(b'-', 1)
348 if len(parts) < 2:
350 if len(parts) < 2:
349 ui.warn((b'malformatted run limit entry, missing "-": %s\n'
351 ui.warn((b'malformatted run limit entry, missing "-": %s\n'
350 % item))
352 % item))
351 continue
353 continue
352 try:
354 try:
353 time_limit = float(pycompat.sysstr(parts[0]))
355 time_limit = float(pycompat.sysstr(parts[0]))
354 except ValueError as e:
356 except ValueError as e:
355 ui.warn((b'malformatted run limit entry, %s: %s\n'
357 ui.warn((b'malformatted run limit entry, %s: %s\n'
356 % (pycompat.bytestr(e), item)))
358 % (pycompat.bytestr(e), item)))
357 continue
359 continue
358 try:
360 try:
359 run_limit = int(pycompat.sysstr(parts[1]))
361 run_limit = int(pycompat.sysstr(parts[1]))
360 except ValueError as e:
362 except ValueError as e:
361 ui.warn((b'malformatted run limit entry, %s: %s\n'
363 ui.warn((b'malformatted run limit entry, %s: %s\n'
362 % (pycompat.bytestr(e), item)))
364 % (pycompat.bytestr(e), item)))
363 continue
365 continue
364 limits.append((time_limit, run_limit))
366 limits.append((time_limit, run_limit))
365 if not limits:
367 if not limits:
366 limits = DEFAULTLIMITS
368 limits = DEFAULTLIMITS
367
369
368 profiler = None
370 profiler = None
369 if profiling is not None:
371 if profiling is not None:
370 if ui.configbool(b"perf", b"profile-benchmark", False):
372 if ui.configbool(b"perf", b"profile-benchmark", False):
371 profiler = profiling.profile(ui)
373 profiler = profiling.profile(ui)
372
374
373 prerun = getint(ui, b"perf", b"pre-run", 0)
375 prerun = getint(ui, b"perf", b"pre-run", 0)
374 t = functools.partial(_timer, fm, displayall=displayall, limits=limits,
376 t = functools.partial(_timer, fm, displayall=displayall, limits=limits,
375 prerun=prerun, profiler=profiler)
377 prerun=prerun, profiler=profiler)
376 return t, fm
378 return t, fm
377
379
378 def stub_timer(fm, func, setup=None, title=None):
380 def stub_timer(fm, func, setup=None, title=None):
379 if setup is not None:
381 if setup is not None:
380 setup()
382 setup()
381 func()
383 func()
382
384
383 @contextlib.contextmanager
385 @contextlib.contextmanager
384 def timeone():
386 def timeone():
385 r = []
387 r = []
386 ostart = os.times()
388 ostart = os.times()
387 cstart = util.timer()
389 cstart = util.timer()
388 yield r
390 yield r
389 cstop = util.timer()
391 cstop = util.timer()
390 ostop = os.times()
392 ostop = os.times()
391 a, b = ostart, ostop
393 a, b = ostart, ostop
392 r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
394 r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
393
395
394
396
395 # list of stop condition (elapsed time, minimal run count)
397 # list of stop condition (elapsed time, minimal run count)
396 DEFAULTLIMITS = (
398 DEFAULTLIMITS = (
397 (3.0, 100),
399 (3.0, 100),
398 (10.0, 3),
400 (10.0, 3),
399 )
401 )
400
402
401 def _timer(fm, func, setup=None, title=None, displayall=False,
403 def _timer(fm, func, setup=None, title=None, displayall=False,
402 limits=DEFAULTLIMITS, prerun=0, profiler=None):
404 limits=DEFAULTLIMITS, prerun=0, profiler=None):
403 gc.collect()
405 gc.collect()
404 results = []
406 results = []
405 begin = util.timer()
407 begin = util.timer()
406 count = 0
408 count = 0
407 if profiler is None:
409 if profiler is None:
408 profiler = noop()
410 profiler = NOOPCTX
409 for i in xrange(prerun):
411 for i in xrange(prerun):
410 if setup is not None:
412 if setup is not None:
411 setup()
413 setup()
412 func()
414 func()
413 keepgoing = True
415 keepgoing = True
414 while keepgoing:
416 while keepgoing:
415 if setup is not None:
417 if setup is not None:
416 setup()
418 setup()
417 with profiler:
419 with profiler:
418 with timeone() as item:
420 with timeone() as item:
419 r = func()
421 r = func()
422 profiler = NOOPCTX
420 count += 1
423 count += 1
421 results.append(item[0])
424 results.append(item[0])
422 cstop = util.timer()
425 cstop = util.timer()
423 # Look for a stop condition.
426 # Look for a stop condition.
424 elapsed = cstop - begin
427 elapsed = cstop - begin
425 for t, mincount in limits:
428 for t, mincount in limits:
426 if elapsed >= t and count >= mincount:
429 if elapsed >= t and count >= mincount:
427 keepgoing = False
430 keepgoing = False
428 break
431 break
429
432
430 formatone(fm, results, title=title, result=r,
433 formatone(fm, results, title=title, result=r,
431 displayall=displayall)
434 displayall=displayall)
432
435
433 def formatone(fm, timings, title=None, result=None, displayall=False):
436 def formatone(fm, timings, title=None, result=None, displayall=False):
434
437
435 count = len(timings)
438 count = len(timings)
436
439
437 fm.startitem()
440 fm.startitem()
438
441
439 if title:
442 if title:
440 fm.write(b'title', b'! %s\n', title)
443 fm.write(b'title', b'! %s\n', title)
441 if result:
444 if result:
442 fm.write(b'result', b'! result: %s\n', result)
445 fm.write(b'result', b'! result: %s\n', result)
443 def display(role, entry):
446 def display(role, entry):
444 prefix = b''
447 prefix = b''
445 if role != b'best':
448 if role != b'best':
446 prefix = b'%s.' % role
449 prefix = b'%s.' % role
447 fm.plain(b'!')
450 fm.plain(b'!')
448 fm.write(prefix + b'wall', b' wall %f', entry[0])
451 fm.write(prefix + b'wall', b' wall %f', entry[0])
449 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
452 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
450 fm.write(prefix + b'user', b' user %f', entry[1])
453 fm.write(prefix + b'user', b' user %f', entry[1])
451 fm.write(prefix + b'sys', b' sys %f', entry[2])
454 fm.write(prefix + b'sys', b' sys %f', entry[2])
452 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
455 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
453 fm.plain(b'\n')
456 fm.plain(b'\n')
454 timings.sort()
457 timings.sort()
455 min_val = timings[0]
458 min_val = timings[0]
456 display(b'best', min_val)
459 display(b'best', min_val)
457 if displayall:
460 if displayall:
458 max_val = timings[-1]
461 max_val = timings[-1]
459 display(b'max', max_val)
462 display(b'max', max_val)
460 avg = tuple([sum(x) / count for x in zip(*timings)])
463 avg = tuple([sum(x) / count for x in zip(*timings)])
461 display(b'avg', avg)
464 display(b'avg', avg)
462 median = timings[len(timings) // 2]
465 median = timings[len(timings) // 2]
463 display(b'median', median)
466 display(b'median', median)
464
467
465 # utilities for historical portability
468 # utilities for historical portability
466
469
467 def getint(ui, section, name, default):
470 def getint(ui, section, name, default):
468 # for "historical portability":
471 # for "historical portability":
469 # ui.configint has been available since 1.9 (or fa2b596db182)
472 # ui.configint has been available since 1.9 (or fa2b596db182)
470 v = ui.config(section, name, None)
473 v = ui.config(section, name, None)
471 if v is None:
474 if v is None:
472 return default
475 return default
473 try:
476 try:
474 return int(v)
477 return int(v)
475 except ValueError:
478 except ValueError:
476 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
479 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
477 % (section, name, v))
480 % (section, name, v))
478
481
479 def safeattrsetter(obj, name, ignoremissing=False):
482 def safeattrsetter(obj, name, ignoremissing=False):
480 """Ensure that 'obj' has 'name' attribute before subsequent setattr
483 """Ensure that 'obj' has 'name' attribute before subsequent setattr
481
484
482 This function is aborted, if 'obj' doesn't have 'name' attribute
485 This function is aborted, if 'obj' doesn't have 'name' attribute
483 at runtime. This avoids overlooking removal of an attribute, which
486 at runtime. This avoids overlooking removal of an attribute, which
484 breaks assumption of performance measurement, in the future.
487 breaks assumption of performance measurement, in the future.
485
488
486 This function returns the object to (1) assign a new value, and
489 This function returns the object to (1) assign a new value, and
487 (2) restore an original value to the attribute.
490 (2) restore an original value to the attribute.
488
491
489 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
492 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
490 abortion, and this function returns None. This is useful to
493 abortion, and this function returns None. This is useful to
491 examine an attribute, which isn't ensured in all Mercurial
494 examine an attribute, which isn't ensured in all Mercurial
492 versions.
495 versions.
493 """
496 """
494 if not util.safehasattr(obj, name):
497 if not util.safehasattr(obj, name):
495 if ignoremissing:
498 if ignoremissing:
496 return None
499 return None
497 raise error.Abort((b"missing attribute %s of %s might break assumption"
500 raise error.Abort((b"missing attribute %s of %s might break assumption"
498 b" of performance measurement") % (name, obj))
501 b" of performance measurement") % (name, obj))
499
502
500 origvalue = getattr(obj, _sysstr(name))
503 origvalue = getattr(obj, _sysstr(name))
501 class attrutil(object):
504 class attrutil(object):
502 def set(self, newvalue):
505 def set(self, newvalue):
503 setattr(obj, _sysstr(name), newvalue)
506 setattr(obj, _sysstr(name), newvalue)
504 def restore(self):
507 def restore(self):
505 setattr(obj, _sysstr(name), origvalue)
508 setattr(obj, _sysstr(name), origvalue)
506
509
507 return attrutil()
510 return attrutil()
508
511
509 # utilities to examine each internal API changes
512 # utilities to examine each internal API changes
510
513
511 def getbranchmapsubsettable():
514 def getbranchmapsubsettable():
512 # for "historical portability":
515 # for "historical portability":
513 # subsettable is defined in:
516 # subsettable is defined in:
514 # - branchmap since 2.9 (or 175c6fd8cacc)
517 # - branchmap since 2.9 (or 175c6fd8cacc)
515 # - repoview since 2.5 (or 59a9f18d4587)
518 # - repoview since 2.5 (or 59a9f18d4587)
516 # - repoviewutil since 5.0
519 # - repoviewutil since 5.0
517 for mod in (branchmap, repoview, repoviewutil):
520 for mod in (branchmap, repoview, repoviewutil):
518 subsettable = getattr(mod, 'subsettable', None)
521 subsettable = getattr(mod, 'subsettable', None)
519 if subsettable:
522 if subsettable:
520 return subsettable
523 return subsettable
521
524
522 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
525 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
523 # branchmap and repoview modules exist, but subsettable attribute
526 # branchmap and repoview modules exist, but subsettable attribute
524 # doesn't)
527 # doesn't)
525 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
528 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
526 hint=b"use 2.5 or later")
529 hint=b"use 2.5 or later")
527
530
528 def getsvfs(repo):
531 def getsvfs(repo):
529 """Return appropriate object to access files under .hg/store
532 """Return appropriate object to access files under .hg/store
530 """
533 """
531 # for "historical portability":
534 # for "historical portability":
532 # repo.svfs has been available since 2.3 (or 7034365089bf)
535 # repo.svfs has been available since 2.3 (or 7034365089bf)
533 svfs = getattr(repo, 'svfs', None)
536 svfs = getattr(repo, 'svfs', None)
534 if svfs:
537 if svfs:
535 return svfs
538 return svfs
536 else:
539 else:
537 return getattr(repo, 'sopener')
540 return getattr(repo, 'sopener')
538
541
539 def getvfs(repo):
542 def getvfs(repo):
540 """Return appropriate object to access files under .hg
543 """Return appropriate object to access files under .hg
541 """
544 """
542 # for "historical portability":
545 # for "historical portability":
543 # repo.vfs has been available since 2.3 (or 7034365089bf)
546 # repo.vfs has been available since 2.3 (or 7034365089bf)
544 vfs = getattr(repo, 'vfs', None)
547 vfs = getattr(repo, 'vfs', None)
545 if vfs:
548 if vfs:
546 return vfs
549 return vfs
547 else:
550 else:
548 return getattr(repo, 'opener')
551 return getattr(repo, 'opener')
549
552
550 def repocleartagscachefunc(repo):
553 def repocleartagscachefunc(repo):
551 """Return the function to clear tags cache according to repo internal API
554 """Return the function to clear tags cache according to repo internal API
552 """
555 """
553 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
556 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
554 # in this case, setattr(repo, '_tagscache', None) or so isn't
557 # in this case, setattr(repo, '_tagscache', None) or so isn't
555 # correct way to clear tags cache, because existing code paths
558 # correct way to clear tags cache, because existing code paths
556 # expect _tagscache to be a structured object.
559 # expect _tagscache to be a structured object.
557 def clearcache():
560 def clearcache():
558 # _tagscache has been filteredpropertycache since 2.5 (or
561 # _tagscache has been filteredpropertycache since 2.5 (or
559 # 98c867ac1330), and delattr() can't work in such case
562 # 98c867ac1330), and delattr() can't work in such case
560 if b'_tagscache' in vars(repo):
563 if b'_tagscache' in vars(repo):
561 del repo.__dict__[b'_tagscache']
564 del repo.__dict__[b'_tagscache']
562 return clearcache
565 return clearcache
563
566
564 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
567 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
565 if repotags: # since 1.4 (or 5614a628d173)
568 if repotags: # since 1.4 (or 5614a628d173)
566 return lambda : repotags.set(None)
569 return lambda : repotags.set(None)
567
570
568 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
571 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
569 if repotagscache: # since 0.6 (or d7df759d0e97)
572 if repotagscache: # since 0.6 (or d7df759d0e97)
570 return lambda : repotagscache.set(None)
573 return lambda : repotagscache.set(None)
571
574
572 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
575 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
573 # this point, but it isn't so problematic, because:
576 # this point, but it isn't so problematic, because:
574 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
577 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
575 # in perftags() causes failure soon
578 # in perftags() causes failure soon
576 # - perf.py itself has been available since 1.1 (or eb240755386d)
579 # - perf.py itself has been available since 1.1 (or eb240755386d)
577 raise error.Abort((b"tags API of this hg command is unknown"))
580 raise error.Abort((b"tags API of this hg command is unknown"))
578
581
579 # utilities to clear cache
582 # utilities to clear cache
580
583
581 def clearfilecache(obj, attrname):
584 def clearfilecache(obj, attrname):
582 unfiltered = getattr(obj, 'unfiltered', None)
585 unfiltered = getattr(obj, 'unfiltered', None)
583 if unfiltered is not None:
586 if unfiltered is not None:
584 obj = obj.unfiltered()
587 obj = obj.unfiltered()
585 if attrname in vars(obj):
588 if attrname in vars(obj):
586 delattr(obj, attrname)
589 delattr(obj, attrname)
587 obj._filecache.pop(attrname, None)
590 obj._filecache.pop(attrname, None)
588
591
589 def clearchangelog(repo):
592 def clearchangelog(repo):
590 if repo is not repo.unfiltered():
593 if repo is not repo.unfiltered():
591 object.__setattr__(repo, r'_clcachekey', None)
594 object.__setattr__(repo, r'_clcachekey', None)
592 object.__setattr__(repo, r'_clcache', None)
595 object.__setattr__(repo, r'_clcache', None)
593 clearfilecache(repo.unfiltered(), 'changelog')
596 clearfilecache(repo.unfiltered(), 'changelog')
594
597
595 # perf commands
598 # perf commands
596
599
597 @command(b'perfwalk', formatteropts)
600 @command(b'perfwalk', formatteropts)
598 def perfwalk(ui, repo, *pats, **opts):
601 def perfwalk(ui, repo, *pats, **opts):
599 opts = _byteskwargs(opts)
602 opts = _byteskwargs(opts)
600 timer, fm = gettimer(ui, opts)
603 timer, fm = gettimer(ui, opts)
601 m = scmutil.match(repo[None], pats, {})
604 m = scmutil.match(repo[None], pats, {})
602 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
605 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
603 ignored=False))))
606 ignored=False))))
604 fm.end()
607 fm.end()
605
608
606 @command(b'perfannotate', formatteropts)
609 @command(b'perfannotate', formatteropts)
607 def perfannotate(ui, repo, f, **opts):
610 def perfannotate(ui, repo, f, **opts):
608 opts = _byteskwargs(opts)
611 opts = _byteskwargs(opts)
609 timer, fm = gettimer(ui, opts)
612 timer, fm = gettimer(ui, opts)
610 fc = repo[b'.'][f]
613 fc = repo[b'.'][f]
611 timer(lambda: len(fc.annotate(True)))
614 timer(lambda: len(fc.annotate(True)))
612 fm.end()
615 fm.end()
613
616
614 @command(b'perfstatus',
617 @command(b'perfstatus',
615 [(b'u', b'unknown', False,
618 [(b'u', b'unknown', False,
616 b'ask status to look for unknown files')] + formatteropts)
619 b'ask status to look for unknown files')] + formatteropts)
617 def perfstatus(ui, repo, **opts):
620 def perfstatus(ui, repo, **opts):
618 opts = _byteskwargs(opts)
621 opts = _byteskwargs(opts)
619 #m = match.always(repo.root, repo.getcwd())
622 #m = match.always(repo.root, repo.getcwd())
620 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
623 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
621 # False))))
624 # False))))
622 timer, fm = gettimer(ui, opts)
625 timer, fm = gettimer(ui, opts)
623 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
626 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
624 fm.end()
627 fm.end()
625
628
626 @command(b'perfaddremove', formatteropts)
629 @command(b'perfaddremove', formatteropts)
627 def perfaddremove(ui, repo, **opts):
630 def perfaddremove(ui, repo, **opts):
628 opts = _byteskwargs(opts)
631 opts = _byteskwargs(opts)
629 timer, fm = gettimer(ui, opts)
632 timer, fm = gettimer(ui, opts)
630 try:
633 try:
631 oldquiet = repo.ui.quiet
634 oldquiet = repo.ui.quiet
632 repo.ui.quiet = True
635 repo.ui.quiet = True
633 matcher = scmutil.match(repo[None])
636 matcher = scmutil.match(repo[None])
634 opts[b'dry_run'] = True
637 opts[b'dry_run'] = True
635 if b'uipathfn' in getargspec(scmutil.addremove).args:
638 if b'uipathfn' in getargspec(scmutil.addremove).args:
636 uipathfn = scmutil.getuipathfn(repo)
639 uipathfn = scmutil.getuipathfn(repo)
637 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
640 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
638 else:
641 else:
639 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
642 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
640 finally:
643 finally:
641 repo.ui.quiet = oldquiet
644 repo.ui.quiet = oldquiet
642 fm.end()
645 fm.end()
643
646
644 def clearcaches(cl):
647 def clearcaches(cl):
645 # behave somewhat consistently across internal API changes
648 # behave somewhat consistently across internal API changes
646 if util.safehasattr(cl, b'clearcaches'):
649 if util.safehasattr(cl, b'clearcaches'):
647 cl.clearcaches()
650 cl.clearcaches()
648 elif util.safehasattr(cl, b'_nodecache'):
651 elif util.safehasattr(cl, b'_nodecache'):
649 from mercurial.node import nullid, nullrev
652 from mercurial.node import nullid, nullrev
650 cl._nodecache = {nullid: nullrev}
653 cl._nodecache = {nullid: nullrev}
651 cl._nodepos = None
654 cl._nodepos = None
652
655
653 @command(b'perfheads', formatteropts)
656 @command(b'perfheads', formatteropts)
654 def perfheads(ui, repo, **opts):
657 def perfheads(ui, repo, **opts):
655 """benchmark the computation of a changelog heads"""
658 """benchmark the computation of a changelog heads"""
656 opts = _byteskwargs(opts)
659 opts = _byteskwargs(opts)
657 timer, fm = gettimer(ui, opts)
660 timer, fm = gettimer(ui, opts)
658 cl = repo.changelog
661 cl = repo.changelog
659 def s():
662 def s():
660 clearcaches(cl)
663 clearcaches(cl)
661 def d():
664 def d():
662 len(cl.headrevs())
665 len(cl.headrevs())
663 timer(d, setup=s)
666 timer(d, setup=s)
664 fm.end()
667 fm.end()
665
668
666 @command(b'perftags', formatteropts+
669 @command(b'perftags', formatteropts+
667 [
670 [
668 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
671 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
669 ])
672 ])
670 def perftags(ui, repo, **opts):
673 def perftags(ui, repo, **opts):
671 opts = _byteskwargs(opts)
674 opts = _byteskwargs(opts)
672 timer, fm = gettimer(ui, opts)
675 timer, fm = gettimer(ui, opts)
673 repocleartagscache = repocleartagscachefunc(repo)
676 repocleartagscache = repocleartagscachefunc(repo)
674 clearrevlogs = opts[b'clear_revlogs']
677 clearrevlogs = opts[b'clear_revlogs']
675 def s():
678 def s():
676 if clearrevlogs:
679 if clearrevlogs:
677 clearchangelog(repo)
680 clearchangelog(repo)
678 clearfilecache(repo.unfiltered(), 'manifest')
681 clearfilecache(repo.unfiltered(), 'manifest')
679 repocleartagscache()
682 repocleartagscache()
680 def t():
683 def t():
681 return len(repo.tags())
684 return len(repo.tags())
682 timer(t, setup=s)
685 timer(t, setup=s)
683 fm.end()
686 fm.end()
684
687
685 @command(b'perfancestors', formatteropts)
688 @command(b'perfancestors', formatteropts)
686 def perfancestors(ui, repo, **opts):
689 def perfancestors(ui, repo, **opts):
687 opts = _byteskwargs(opts)
690 opts = _byteskwargs(opts)
688 timer, fm = gettimer(ui, opts)
691 timer, fm = gettimer(ui, opts)
689 heads = repo.changelog.headrevs()
692 heads = repo.changelog.headrevs()
690 def d():
693 def d():
691 for a in repo.changelog.ancestors(heads):
694 for a in repo.changelog.ancestors(heads):
692 pass
695 pass
693 timer(d)
696 timer(d)
694 fm.end()
697 fm.end()
695
698
696 @command(b'perfancestorset', formatteropts)
699 @command(b'perfancestorset', formatteropts)
697 def perfancestorset(ui, repo, revset, **opts):
700 def perfancestorset(ui, repo, revset, **opts):
698 opts = _byteskwargs(opts)
701 opts = _byteskwargs(opts)
699 timer, fm = gettimer(ui, opts)
702 timer, fm = gettimer(ui, opts)
700 revs = repo.revs(revset)
703 revs = repo.revs(revset)
701 heads = repo.changelog.headrevs()
704 heads = repo.changelog.headrevs()
702 def d():
705 def d():
703 s = repo.changelog.ancestors(heads)
706 s = repo.changelog.ancestors(heads)
704 for rev in revs:
707 for rev in revs:
705 rev in s
708 rev in s
706 timer(d)
709 timer(d)
707 fm.end()
710 fm.end()
708
711
709 @command(b'perfdiscovery', formatteropts, b'PATH')
712 @command(b'perfdiscovery', formatteropts, b'PATH')
710 def perfdiscovery(ui, repo, path, **opts):
713 def perfdiscovery(ui, repo, path, **opts):
711 """benchmark discovery between local repo and the peer at given path
714 """benchmark discovery between local repo and the peer at given path
712 """
715 """
713 repos = [repo, None]
716 repos = [repo, None]
714 timer, fm = gettimer(ui, opts)
717 timer, fm = gettimer(ui, opts)
715 path = ui.expandpath(path)
718 path = ui.expandpath(path)
716
719
717 def s():
720 def s():
718 repos[1] = hg.peer(ui, opts, path)
721 repos[1] = hg.peer(ui, opts, path)
719 def d():
722 def d():
720 setdiscovery.findcommonheads(ui, *repos)
723 setdiscovery.findcommonheads(ui, *repos)
721 timer(d, setup=s)
724 timer(d, setup=s)
722 fm.end()
725 fm.end()
723
726
724 @command(b'perfbookmarks', formatteropts +
727 @command(b'perfbookmarks', formatteropts +
725 [
728 [
726 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
729 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
727 ])
730 ])
728 def perfbookmarks(ui, repo, **opts):
731 def perfbookmarks(ui, repo, **opts):
729 """benchmark parsing bookmarks from disk to memory"""
732 """benchmark parsing bookmarks from disk to memory"""
730 opts = _byteskwargs(opts)
733 opts = _byteskwargs(opts)
731 timer, fm = gettimer(ui, opts)
734 timer, fm = gettimer(ui, opts)
732
735
733 clearrevlogs = opts[b'clear_revlogs']
736 clearrevlogs = opts[b'clear_revlogs']
734 def s():
737 def s():
735 if clearrevlogs:
738 if clearrevlogs:
736 clearchangelog(repo)
739 clearchangelog(repo)
737 clearfilecache(repo, b'_bookmarks')
740 clearfilecache(repo, b'_bookmarks')
738 def d():
741 def d():
739 repo._bookmarks
742 repo._bookmarks
740 timer(d, setup=s)
743 timer(d, setup=s)
741 fm.end()
744 fm.end()
742
745
743 @command(b'perfbundleread', formatteropts, b'BUNDLE')
746 @command(b'perfbundleread', formatteropts, b'BUNDLE')
744 def perfbundleread(ui, repo, bundlepath, **opts):
747 def perfbundleread(ui, repo, bundlepath, **opts):
745 """Benchmark reading of bundle files.
748 """Benchmark reading of bundle files.
746
749
747 This command is meant to isolate the I/O part of bundle reading as
750 This command is meant to isolate the I/O part of bundle reading as
748 much as possible.
751 much as possible.
749 """
752 """
750 from mercurial import (
753 from mercurial import (
751 bundle2,
754 bundle2,
752 exchange,
755 exchange,
753 streamclone,
756 streamclone,
754 )
757 )
755
758
756 opts = _byteskwargs(opts)
759 opts = _byteskwargs(opts)
757
760
758 def makebench(fn):
761 def makebench(fn):
759 def run():
762 def run():
760 with open(bundlepath, b'rb') as fh:
763 with open(bundlepath, b'rb') as fh:
761 bundle = exchange.readbundle(ui, fh, bundlepath)
764 bundle = exchange.readbundle(ui, fh, bundlepath)
762 fn(bundle)
765 fn(bundle)
763
766
764 return run
767 return run
765
768
766 def makereadnbytes(size):
769 def makereadnbytes(size):
767 def run():
770 def run():
768 with open(bundlepath, b'rb') as fh:
771 with open(bundlepath, b'rb') as fh:
769 bundle = exchange.readbundle(ui, fh, bundlepath)
772 bundle = exchange.readbundle(ui, fh, bundlepath)
770 while bundle.read(size):
773 while bundle.read(size):
771 pass
774 pass
772
775
773 return run
776 return run
774
777
775 def makestdioread(size):
778 def makestdioread(size):
776 def run():
779 def run():
777 with open(bundlepath, b'rb') as fh:
780 with open(bundlepath, b'rb') as fh:
778 while fh.read(size):
781 while fh.read(size):
779 pass
782 pass
780
783
781 return run
784 return run
782
785
783 # bundle1
786 # bundle1
784
787
785 def deltaiter(bundle):
788 def deltaiter(bundle):
786 for delta in bundle.deltaiter():
789 for delta in bundle.deltaiter():
787 pass
790 pass
788
791
789 def iterchunks(bundle):
792 def iterchunks(bundle):
790 for chunk in bundle.getchunks():
793 for chunk in bundle.getchunks():
791 pass
794 pass
792
795
793 # bundle2
796 # bundle2
794
797
795 def forwardchunks(bundle):
798 def forwardchunks(bundle):
796 for chunk in bundle._forwardchunks():
799 for chunk in bundle._forwardchunks():
797 pass
800 pass
798
801
799 def iterparts(bundle):
802 def iterparts(bundle):
800 for part in bundle.iterparts():
803 for part in bundle.iterparts():
801 pass
804 pass
802
805
803 def iterpartsseekable(bundle):
806 def iterpartsseekable(bundle):
804 for part in bundle.iterparts(seekable=True):
807 for part in bundle.iterparts(seekable=True):
805 pass
808 pass
806
809
807 def seek(bundle):
810 def seek(bundle):
808 for part in bundle.iterparts(seekable=True):
811 for part in bundle.iterparts(seekable=True):
809 part.seek(0, os.SEEK_END)
812 part.seek(0, os.SEEK_END)
810
813
811 def makepartreadnbytes(size):
814 def makepartreadnbytes(size):
812 def run():
815 def run():
813 with open(bundlepath, b'rb') as fh:
816 with open(bundlepath, b'rb') as fh:
814 bundle = exchange.readbundle(ui, fh, bundlepath)
817 bundle = exchange.readbundle(ui, fh, bundlepath)
815 for part in bundle.iterparts():
818 for part in bundle.iterparts():
816 while part.read(size):
819 while part.read(size):
817 pass
820 pass
818
821
819 return run
822 return run
820
823
821 benches = [
824 benches = [
822 (makestdioread(8192), b'read(8k)'),
825 (makestdioread(8192), b'read(8k)'),
823 (makestdioread(16384), b'read(16k)'),
826 (makestdioread(16384), b'read(16k)'),
824 (makestdioread(32768), b'read(32k)'),
827 (makestdioread(32768), b'read(32k)'),
825 (makestdioread(131072), b'read(128k)'),
828 (makestdioread(131072), b'read(128k)'),
826 ]
829 ]
827
830
828 with open(bundlepath, b'rb') as fh:
831 with open(bundlepath, b'rb') as fh:
829 bundle = exchange.readbundle(ui, fh, bundlepath)
832 bundle = exchange.readbundle(ui, fh, bundlepath)
830
833
831 if isinstance(bundle, changegroup.cg1unpacker):
834 if isinstance(bundle, changegroup.cg1unpacker):
832 benches.extend([
835 benches.extend([
833 (makebench(deltaiter), b'cg1 deltaiter()'),
836 (makebench(deltaiter), b'cg1 deltaiter()'),
834 (makebench(iterchunks), b'cg1 getchunks()'),
837 (makebench(iterchunks), b'cg1 getchunks()'),
835 (makereadnbytes(8192), b'cg1 read(8k)'),
838 (makereadnbytes(8192), b'cg1 read(8k)'),
836 (makereadnbytes(16384), b'cg1 read(16k)'),
839 (makereadnbytes(16384), b'cg1 read(16k)'),
837 (makereadnbytes(32768), b'cg1 read(32k)'),
840 (makereadnbytes(32768), b'cg1 read(32k)'),
838 (makereadnbytes(131072), b'cg1 read(128k)'),
841 (makereadnbytes(131072), b'cg1 read(128k)'),
839 ])
842 ])
840 elif isinstance(bundle, bundle2.unbundle20):
843 elif isinstance(bundle, bundle2.unbundle20):
841 benches.extend([
844 benches.extend([
842 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
845 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
843 (makebench(iterparts), b'bundle2 iterparts()'),
846 (makebench(iterparts), b'bundle2 iterparts()'),
844 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
847 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
845 (makebench(seek), b'bundle2 part seek()'),
848 (makebench(seek), b'bundle2 part seek()'),
846 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
849 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
847 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
850 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
848 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
851 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
849 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
852 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
850 ])
853 ])
851 elif isinstance(bundle, streamclone.streamcloneapplier):
854 elif isinstance(bundle, streamclone.streamcloneapplier):
852 raise error.Abort(b'stream clone bundles not supported')
855 raise error.Abort(b'stream clone bundles not supported')
853 else:
856 else:
854 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
857 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
855
858
856 for fn, title in benches:
859 for fn, title in benches:
857 timer, fm = gettimer(ui, opts)
860 timer, fm = gettimer(ui, opts)
858 timer(fn, title=title)
861 timer(fn, title=title)
859 fm.end()
862 fm.end()
860
863
861 @command(b'perfchangegroupchangelog', formatteropts +
864 @command(b'perfchangegroupchangelog', formatteropts +
862 [(b'', b'cgversion', b'02', b'changegroup version'),
865 [(b'', b'cgversion', b'02', b'changegroup version'),
863 (b'r', b'rev', b'', b'revisions to add to changegroup')])
866 (b'r', b'rev', b'', b'revisions to add to changegroup')])
864 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
867 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
865 """Benchmark producing a changelog group for a changegroup.
868 """Benchmark producing a changelog group for a changegroup.
866
869
867 This measures the time spent processing the changelog during a
870 This measures the time spent processing the changelog during a
868 bundle operation. This occurs during `hg bundle` and on a server
871 bundle operation. This occurs during `hg bundle` and on a server
869 processing a `getbundle` wire protocol request (handles clones
872 processing a `getbundle` wire protocol request (handles clones
870 and pull requests).
873 and pull requests).
871
874
872 By default, all revisions are added to the changegroup.
875 By default, all revisions are added to the changegroup.
873 """
876 """
874 opts = _byteskwargs(opts)
877 opts = _byteskwargs(opts)
875 cl = repo.changelog
878 cl = repo.changelog
876 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
879 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
877 bundler = changegroup.getbundler(cgversion, repo)
880 bundler = changegroup.getbundler(cgversion, repo)
878
881
879 def d():
882 def d():
880 state, chunks = bundler._generatechangelog(cl, nodes)
883 state, chunks = bundler._generatechangelog(cl, nodes)
881 for chunk in chunks:
884 for chunk in chunks:
882 pass
885 pass
883
886
884 timer, fm = gettimer(ui, opts)
887 timer, fm = gettimer(ui, opts)
885
888
886 # Terminal printing can interfere with timing. So disable it.
889 # Terminal printing can interfere with timing. So disable it.
887 with ui.configoverride({(b'progress', b'disable'): True}):
890 with ui.configoverride({(b'progress', b'disable'): True}):
888 timer(d)
891 timer(d)
889
892
890 fm.end()
893 fm.end()
891
894
892 @command(b'perfdirs', formatteropts)
895 @command(b'perfdirs', formatteropts)
893 def perfdirs(ui, repo, **opts):
896 def perfdirs(ui, repo, **opts):
894 opts = _byteskwargs(opts)
897 opts = _byteskwargs(opts)
895 timer, fm = gettimer(ui, opts)
898 timer, fm = gettimer(ui, opts)
896 dirstate = repo.dirstate
899 dirstate = repo.dirstate
897 b'a' in dirstate
900 b'a' in dirstate
898 def d():
901 def d():
899 dirstate.hasdir(b'a')
902 dirstate.hasdir(b'a')
900 del dirstate._map._dirs
903 del dirstate._map._dirs
901 timer(d)
904 timer(d)
902 fm.end()
905 fm.end()
903
906
904 @command(b'perfdirstate', formatteropts)
907 @command(b'perfdirstate', formatteropts)
905 def perfdirstate(ui, repo, **opts):
908 def perfdirstate(ui, repo, **opts):
906 opts = _byteskwargs(opts)
909 opts = _byteskwargs(opts)
907 timer, fm = gettimer(ui, opts)
910 timer, fm = gettimer(ui, opts)
908 b"a" in repo.dirstate
911 b"a" in repo.dirstate
909 def d():
912 def d():
910 repo.dirstate.invalidate()
913 repo.dirstate.invalidate()
911 b"a" in repo.dirstate
914 b"a" in repo.dirstate
912 timer(d)
915 timer(d)
913 fm.end()
916 fm.end()
914
917
915 @command(b'perfdirstatedirs', formatteropts)
918 @command(b'perfdirstatedirs', formatteropts)
916 def perfdirstatedirs(ui, repo, **opts):
919 def perfdirstatedirs(ui, repo, **opts):
917 opts = _byteskwargs(opts)
920 opts = _byteskwargs(opts)
918 timer, fm = gettimer(ui, opts)
921 timer, fm = gettimer(ui, opts)
919 b"a" in repo.dirstate
922 b"a" in repo.dirstate
920 def d():
923 def d():
921 repo.dirstate.hasdir(b"a")
924 repo.dirstate.hasdir(b"a")
922 del repo.dirstate._map._dirs
925 del repo.dirstate._map._dirs
923 timer(d)
926 timer(d)
924 fm.end()
927 fm.end()
925
928
926 @command(b'perfdirstatefoldmap', formatteropts)
929 @command(b'perfdirstatefoldmap', formatteropts)
927 def perfdirstatefoldmap(ui, repo, **opts):
930 def perfdirstatefoldmap(ui, repo, **opts):
928 opts = _byteskwargs(opts)
931 opts = _byteskwargs(opts)
929 timer, fm = gettimer(ui, opts)
932 timer, fm = gettimer(ui, opts)
930 dirstate = repo.dirstate
933 dirstate = repo.dirstate
931 b'a' in dirstate
934 b'a' in dirstate
932 def d():
935 def d():
933 dirstate._map.filefoldmap.get(b'a')
936 dirstate._map.filefoldmap.get(b'a')
934 del dirstate._map.filefoldmap
937 del dirstate._map.filefoldmap
935 timer(d)
938 timer(d)
936 fm.end()
939 fm.end()
937
940
938 @command(b'perfdirfoldmap', formatteropts)
941 @command(b'perfdirfoldmap', formatteropts)
939 def perfdirfoldmap(ui, repo, **opts):
942 def perfdirfoldmap(ui, repo, **opts):
940 opts = _byteskwargs(opts)
943 opts = _byteskwargs(opts)
941 timer, fm = gettimer(ui, opts)
944 timer, fm = gettimer(ui, opts)
942 dirstate = repo.dirstate
945 dirstate = repo.dirstate
943 b'a' in dirstate
946 b'a' in dirstate
944 def d():
947 def d():
945 dirstate._map.dirfoldmap.get(b'a')
948 dirstate._map.dirfoldmap.get(b'a')
946 del dirstate._map.dirfoldmap
949 del dirstate._map.dirfoldmap
947 del dirstate._map._dirs
950 del dirstate._map._dirs
948 timer(d)
951 timer(d)
949 fm.end()
952 fm.end()
950
953
951 @command(b'perfdirstatewrite', formatteropts)
954 @command(b'perfdirstatewrite', formatteropts)
952 def perfdirstatewrite(ui, repo, **opts):
955 def perfdirstatewrite(ui, repo, **opts):
953 opts = _byteskwargs(opts)
956 opts = _byteskwargs(opts)
954 timer, fm = gettimer(ui, opts)
957 timer, fm = gettimer(ui, opts)
955 ds = repo.dirstate
958 ds = repo.dirstate
956 b"a" in ds
959 b"a" in ds
957 def d():
960 def d():
958 ds._dirty = True
961 ds._dirty = True
959 ds.write(repo.currenttransaction())
962 ds.write(repo.currenttransaction())
960 timer(d)
963 timer(d)
961 fm.end()
964 fm.end()
962
965
963 @command(b'perfmergecalculate',
966 @command(b'perfmergecalculate',
964 [(b'r', b'rev', b'.', b'rev to merge against')] + formatteropts)
967 [(b'r', b'rev', b'.', b'rev to merge against')] + formatteropts)
965 def perfmergecalculate(ui, repo, rev, **opts):
968 def perfmergecalculate(ui, repo, rev, **opts):
966 opts = _byteskwargs(opts)
969 opts = _byteskwargs(opts)
967 timer, fm = gettimer(ui, opts)
970 timer, fm = gettimer(ui, opts)
968 wctx = repo[None]
971 wctx = repo[None]
969 rctx = scmutil.revsingle(repo, rev, rev)
972 rctx = scmutil.revsingle(repo, rev, rev)
970 ancestor = wctx.ancestor(rctx)
973 ancestor = wctx.ancestor(rctx)
971 # we don't want working dir files to be stat'd in the benchmark, so prime
974 # we don't want working dir files to be stat'd in the benchmark, so prime
972 # that cache
975 # that cache
973 wctx.dirty()
976 wctx.dirty()
974 def d():
977 def d():
975 # acceptremote is True because we don't want prompts in the middle of
978 # acceptremote is True because we don't want prompts in the middle of
976 # our benchmark
979 # our benchmark
977 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
980 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
978 acceptremote=True, followcopies=True)
981 acceptremote=True, followcopies=True)
979 timer(d)
982 timer(d)
980 fm.end()
983 fm.end()
981
984
982 @command(b'perfpathcopies', [], b"REV REV")
985 @command(b'perfpathcopies', [], b"REV REV")
983 def perfpathcopies(ui, repo, rev1, rev2, **opts):
986 def perfpathcopies(ui, repo, rev1, rev2, **opts):
984 """benchmark the copy tracing logic"""
987 """benchmark the copy tracing logic"""
985 opts = _byteskwargs(opts)
988 opts = _byteskwargs(opts)
986 timer, fm = gettimer(ui, opts)
989 timer, fm = gettimer(ui, opts)
987 ctx1 = scmutil.revsingle(repo, rev1, rev1)
990 ctx1 = scmutil.revsingle(repo, rev1, rev1)
988 ctx2 = scmutil.revsingle(repo, rev2, rev2)
991 ctx2 = scmutil.revsingle(repo, rev2, rev2)
989 def d():
992 def d():
990 copies.pathcopies(ctx1, ctx2)
993 copies.pathcopies(ctx1, ctx2)
991 timer(d)
994 timer(d)
992 fm.end()
995 fm.end()
993
996
994 @command(b'perfphases',
997 @command(b'perfphases',
995 [(b'', b'full', False, b'include file reading time too'),
998 [(b'', b'full', False, b'include file reading time too'),
996 ], b"")
999 ], b"")
997 def perfphases(ui, repo, **opts):
1000 def perfphases(ui, repo, **opts):
998 """benchmark phasesets computation"""
1001 """benchmark phasesets computation"""
999 opts = _byteskwargs(opts)
1002 opts = _byteskwargs(opts)
1000 timer, fm = gettimer(ui, opts)
1003 timer, fm = gettimer(ui, opts)
1001 _phases = repo._phasecache
1004 _phases = repo._phasecache
1002 full = opts.get(b'full')
1005 full = opts.get(b'full')
1003 def d():
1006 def d():
1004 phases = _phases
1007 phases = _phases
1005 if full:
1008 if full:
1006 clearfilecache(repo, b'_phasecache')
1009 clearfilecache(repo, b'_phasecache')
1007 phases = repo._phasecache
1010 phases = repo._phasecache
1008 phases.invalidate()
1011 phases.invalidate()
1009 phases.loadphaserevs(repo)
1012 phases.loadphaserevs(repo)
1010 timer(d)
1013 timer(d)
1011 fm.end()
1014 fm.end()
1012
1015
1013 @command(b'perfphasesremote',
1016 @command(b'perfphasesremote',
1014 [], b"[DEST]")
1017 [], b"[DEST]")
1015 def perfphasesremote(ui, repo, dest=None, **opts):
1018 def perfphasesremote(ui, repo, dest=None, **opts):
1016 """benchmark time needed to analyse phases of the remote server"""
1019 """benchmark time needed to analyse phases of the remote server"""
1017 from mercurial.node import (
1020 from mercurial.node import (
1018 bin,
1021 bin,
1019 )
1022 )
1020 from mercurial import (
1023 from mercurial import (
1021 exchange,
1024 exchange,
1022 hg,
1025 hg,
1023 phases,
1026 phases,
1024 )
1027 )
1025 opts = _byteskwargs(opts)
1028 opts = _byteskwargs(opts)
1026 timer, fm = gettimer(ui, opts)
1029 timer, fm = gettimer(ui, opts)
1027
1030
1028 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
1031 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
1029 if not path:
1032 if not path:
1030 raise error.Abort((b'default repository not configured!'),
1033 raise error.Abort((b'default repository not configured!'),
1031 hint=(b"see 'hg help config.paths'"))
1034 hint=(b"see 'hg help config.paths'"))
1032 dest = path.pushloc or path.loc
1035 dest = path.pushloc or path.loc
1033 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
1036 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
1034 other = hg.peer(repo, opts, dest)
1037 other = hg.peer(repo, opts, dest)
1035
1038
1036 # easier to perform discovery through the operation
1039 # easier to perform discovery through the operation
1037 op = exchange.pushoperation(repo, other)
1040 op = exchange.pushoperation(repo, other)
1038 exchange._pushdiscoverychangeset(op)
1041 exchange._pushdiscoverychangeset(op)
1039
1042
1040 remotesubset = op.fallbackheads
1043 remotesubset = op.fallbackheads
1041
1044
1042 with other.commandexecutor() as e:
1045 with other.commandexecutor() as e:
1043 remotephases = e.callcommand(b'listkeys',
1046 remotephases = e.callcommand(b'listkeys',
1044 {b'namespace': b'phases'}).result()
1047 {b'namespace': b'phases'}).result()
1045 del other
1048 del other
1046 publishing = remotephases.get(b'publishing', False)
1049 publishing = remotephases.get(b'publishing', False)
1047 if publishing:
1050 if publishing:
1048 ui.status((b'publishing: yes\n'))
1051 ui.status((b'publishing: yes\n'))
1049 else:
1052 else:
1050 ui.status((b'publishing: no\n'))
1053 ui.status((b'publishing: no\n'))
1051
1054
1052 nodemap = repo.changelog.nodemap
1055 nodemap = repo.changelog.nodemap
1053 nonpublishroots = 0
1056 nonpublishroots = 0
1054 for nhex, phase in remotephases.iteritems():
1057 for nhex, phase in remotephases.iteritems():
1055 if nhex == b'publishing': # ignore data related to publish option
1058 if nhex == b'publishing': # ignore data related to publish option
1056 continue
1059 continue
1057 node = bin(nhex)
1060 node = bin(nhex)
1058 if node in nodemap and int(phase):
1061 if node in nodemap and int(phase):
1059 nonpublishroots += 1
1062 nonpublishroots += 1
1060 ui.status((b'number of roots: %d\n') % len(remotephases))
1063 ui.status((b'number of roots: %d\n') % len(remotephases))
1061 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
1064 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
1062 def d():
1065 def d():
1063 phases.remotephasessummary(repo,
1066 phases.remotephasessummary(repo,
1064 remotesubset,
1067 remotesubset,
1065 remotephases)
1068 remotephases)
1066 timer(d)
1069 timer(d)
1067 fm.end()
1070 fm.end()
1068
1071
1069 @command(b'perfmanifest',[
1072 @command(b'perfmanifest',[
1070 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1073 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1071 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1074 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1072 ] + formatteropts, b'REV|NODE')
1075 ] + formatteropts, b'REV|NODE')
1073 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1076 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1074 """benchmark the time to read a manifest from disk and return a usable
1077 """benchmark the time to read a manifest from disk and return a usable
1075 dict-like object
1078 dict-like object
1076
1079
1077 Manifest caches are cleared before retrieval."""
1080 Manifest caches are cleared before retrieval."""
1078 opts = _byteskwargs(opts)
1081 opts = _byteskwargs(opts)
1079 timer, fm = gettimer(ui, opts)
1082 timer, fm = gettimer(ui, opts)
1080 if not manifest_rev:
1083 if not manifest_rev:
1081 ctx = scmutil.revsingle(repo, rev, rev)
1084 ctx = scmutil.revsingle(repo, rev, rev)
1082 t = ctx.manifestnode()
1085 t = ctx.manifestnode()
1083 else:
1086 else:
1084 from mercurial.node import bin
1087 from mercurial.node import bin
1085
1088
1086 if len(rev) == 40:
1089 if len(rev) == 40:
1087 t = bin(rev)
1090 t = bin(rev)
1088 else:
1091 else:
1089 try:
1092 try:
1090 rev = int(rev)
1093 rev = int(rev)
1091
1094
1092 if util.safehasattr(repo.manifestlog, b'getstorage'):
1095 if util.safehasattr(repo.manifestlog, b'getstorage'):
1093 t = repo.manifestlog.getstorage(b'').node(rev)
1096 t = repo.manifestlog.getstorage(b'').node(rev)
1094 else:
1097 else:
1095 t = repo.manifestlog._revlog.lookup(rev)
1098 t = repo.manifestlog._revlog.lookup(rev)
1096 except ValueError:
1099 except ValueError:
1097 raise error.Abort(b'manifest revision must be integer or full '
1100 raise error.Abort(b'manifest revision must be integer or full '
1098 b'node')
1101 b'node')
1099 def d():
1102 def d():
1100 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1103 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1101 repo.manifestlog[t].read()
1104 repo.manifestlog[t].read()
1102 timer(d)
1105 timer(d)
1103 fm.end()
1106 fm.end()
1104
1107
1105 @command(b'perfchangeset', formatteropts)
1108 @command(b'perfchangeset', formatteropts)
1106 def perfchangeset(ui, repo, rev, **opts):
1109 def perfchangeset(ui, repo, rev, **opts):
1107 opts = _byteskwargs(opts)
1110 opts = _byteskwargs(opts)
1108 timer, fm = gettimer(ui, opts)
1111 timer, fm = gettimer(ui, opts)
1109 n = scmutil.revsingle(repo, rev).node()
1112 n = scmutil.revsingle(repo, rev).node()
1110 def d():
1113 def d():
1111 repo.changelog.read(n)
1114 repo.changelog.read(n)
1112 #repo.changelog._cache = None
1115 #repo.changelog._cache = None
1113 timer(d)
1116 timer(d)
1114 fm.end()
1117 fm.end()
1115
1118
1116 @command(b'perfignore', formatteropts)
1119 @command(b'perfignore', formatteropts)
1117 def perfignore(ui, repo, **opts):
1120 def perfignore(ui, repo, **opts):
1118 """benchmark operation related to computing ignore"""
1121 """benchmark operation related to computing ignore"""
1119 opts = _byteskwargs(opts)
1122 opts = _byteskwargs(opts)
1120 timer, fm = gettimer(ui, opts)
1123 timer, fm = gettimer(ui, opts)
1121 dirstate = repo.dirstate
1124 dirstate = repo.dirstate
1122
1125
1123 def setupone():
1126 def setupone():
1124 dirstate.invalidate()
1127 dirstate.invalidate()
1125 clearfilecache(dirstate, b'_ignore')
1128 clearfilecache(dirstate, b'_ignore')
1126
1129
1127 def runone():
1130 def runone():
1128 dirstate._ignore
1131 dirstate._ignore
1129
1132
1130 timer(runone, setup=setupone, title=b"load")
1133 timer(runone, setup=setupone, title=b"load")
1131 fm.end()
1134 fm.end()
1132
1135
1133 @command(b'perfindex', [
1136 @command(b'perfindex', [
1134 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1137 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1135 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1138 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1136 ] + formatteropts)
1139 ] + formatteropts)
1137 def perfindex(ui, repo, **opts):
1140 def perfindex(ui, repo, **opts):
1138 """benchmark index creation time followed by a lookup
1141 """benchmark index creation time followed by a lookup
1139
1142
1140 The default is to look `tip` up. Depending on the index implementation,
1143 The default is to look `tip` up. Depending on the index implementation,
1141 the revision looked up can matters. For example, an implementation
1144 the revision looked up can matters. For example, an implementation
1142 scanning the index will have a faster lookup time for `--rev tip` than for
1145 scanning the index will have a faster lookup time for `--rev tip` than for
1143 `--rev 0`. The number of looked up revisions and their order can also
1146 `--rev 0`. The number of looked up revisions and their order can also
1144 matters.
1147 matters.
1145
1148
1146 Example of useful set to test:
1149 Example of useful set to test:
1147 * tip
1150 * tip
1148 * 0
1151 * 0
1149 * -10:
1152 * -10:
1150 * :10
1153 * :10
1151 * -10: + :10
1154 * -10: + :10
1152 * :10: + -10:
1155 * :10: + -10:
1153 * -10000:
1156 * -10000:
1154 * -10000: + 0
1157 * -10000: + 0
1155
1158
1156 It is not currently possible to check for lookup of a missing node. For
1159 It is not currently possible to check for lookup of a missing node. For
1157 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1160 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1158 import mercurial.revlog
1161 import mercurial.revlog
1159 opts = _byteskwargs(opts)
1162 opts = _byteskwargs(opts)
1160 timer, fm = gettimer(ui, opts)
1163 timer, fm = gettimer(ui, opts)
1161 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1164 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1162 if opts[b'no_lookup']:
1165 if opts[b'no_lookup']:
1163 if opts['rev']:
1166 if opts['rev']:
1164 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1167 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1165 nodes = []
1168 nodes = []
1166 elif not opts[b'rev']:
1169 elif not opts[b'rev']:
1167 nodes = [repo[b"tip"].node()]
1170 nodes = [repo[b"tip"].node()]
1168 else:
1171 else:
1169 revs = scmutil.revrange(repo, opts[b'rev'])
1172 revs = scmutil.revrange(repo, opts[b'rev'])
1170 cl = repo.changelog
1173 cl = repo.changelog
1171 nodes = [cl.node(r) for r in revs]
1174 nodes = [cl.node(r) for r in revs]
1172
1175
1173 unfi = repo.unfiltered()
1176 unfi = repo.unfiltered()
1174 # find the filecache func directly
1177 # find the filecache func directly
1175 # This avoid polluting the benchmark with the filecache logic
1178 # This avoid polluting the benchmark with the filecache logic
1176 makecl = unfi.__class__.changelog.func
1179 makecl = unfi.__class__.changelog.func
1177 def setup():
1180 def setup():
1178 # probably not necessary, but for good measure
1181 # probably not necessary, but for good measure
1179 clearchangelog(unfi)
1182 clearchangelog(unfi)
1180 def d():
1183 def d():
1181 cl = makecl(unfi)
1184 cl = makecl(unfi)
1182 for n in nodes:
1185 for n in nodes:
1183 cl.rev(n)
1186 cl.rev(n)
1184 timer(d, setup=setup)
1187 timer(d, setup=setup)
1185 fm.end()
1188 fm.end()
1186
1189
1187 @command(b'perfnodemap', [
1190 @command(b'perfnodemap', [
1188 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1191 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1189 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1192 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1190 ] + formatteropts)
1193 ] + formatteropts)
1191 def perfnodemap(ui, repo, **opts):
1194 def perfnodemap(ui, repo, **opts):
1192 """benchmark the time necessary to look up revision from a cold nodemap
1195 """benchmark the time necessary to look up revision from a cold nodemap
1193
1196
1194 Depending on the implementation, the amount and order of revision we look
1197 Depending on the implementation, the amount and order of revision we look
1195 up can varies. Example of useful set to test:
1198 up can varies. Example of useful set to test:
1196 * tip
1199 * tip
1197 * 0
1200 * 0
1198 * -10:
1201 * -10:
1199 * :10
1202 * :10
1200 * -10: + :10
1203 * -10: + :10
1201 * :10: + -10:
1204 * :10: + -10:
1202 * -10000:
1205 * -10000:
1203 * -10000: + 0
1206 * -10000: + 0
1204
1207
1205 The command currently focus on valid binary lookup. Benchmarking for
1208 The command currently focus on valid binary lookup. Benchmarking for
1206 hexlookup, prefix lookup and missing lookup would also be valuable.
1209 hexlookup, prefix lookup and missing lookup would also be valuable.
1207 """
1210 """
1208 import mercurial.revlog
1211 import mercurial.revlog
1209 opts = _byteskwargs(opts)
1212 opts = _byteskwargs(opts)
1210 timer, fm = gettimer(ui, opts)
1213 timer, fm = gettimer(ui, opts)
1211 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1214 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1212
1215
1213 unfi = repo.unfiltered()
1216 unfi = repo.unfiltered()
1214 clearcaches = opts['clear_caches']
1217 clearcaches = opts['clear_caches']
1215 # find the filecache func directly
1218 # find the filecache func directly
1216 # This avoid polluting the benchmark with the filecache logic
1219 # This avoid polluting the benchmark with the filecache logic
1217 makecl = unfi.__class__.changelog.func
1220 makecl = unfi.__class__.changelog.func
1218 if not opts[b'rev']:
1221 if not opts[b'rev']:
1219 raise error.Abort('use --rev to specify revisions to look up')
1222 raise error.Abort('use --rev to specify revisions to look up')
1220 revs = scmutil.revrange(repo, opts[b'rev'])
1223 revs = scmutil.revrange(repo, opts[b'rev'])
1221 cl = repo.changelog
1224 cl = repo.changelog
1222 nodes = [cl.node(r) for r in revs]
1225 nodes = [cl.node(r) for r in revs]
1223
1226
1224 # use a list to pass reference to a nodemap from one closure to the next
1227 # use a list to pass reference to a nodemap from one closure to the next
1225 nodeget = [None]
1228 nodeget = [None]
1226 def setnodeget():
1229 def setnodeget():
1227 # probably not necessary, but for good measure
1230 # probably not necessary, but for good measure
1228 clearchangelog(unfi)
1231 clearchangelog(unfi)
1229 nodeget[0] = makecl(unfi).nodemap.get
1232 nodeget[0] = makecl(unfi).nodemap.get
1230
1233
1231 def d():
1234 def d():
1232 get = nodeget[0]
1235 get = nodeget[0]
1233 for n in nodes:
1236 for n in nodes:
1234 get(n)
1237 get(n)
1235
1238
1236 setup = None
1239 setup = None
1237 if clearcaches:
1240 if clearcaches:
1238 def setup():
1241 def setup():
1239 setnodeget()
1242 setnodeget()
1240 else:
1243 else:
1241 setnodeget()
1244 setnodeget()
1242 d() # prewarm the data structure
1245 d() # prewarm the data structure
1243 timer(d, setup=setup)
1246 timer(d, setup=setup)
1244 fm.end()
1247 fm.end()
1245
1248
1246 @command(b'perfstartup', formatteropts)
1249 @command(b'perfstartup', formatteropts)
1247 def perfstartup(ui, repo, **opts):
1250 def perfstartup(ui, repo, **opts):
1248 opts = _byteskwargs(opts)
1251 opts = _byteskwargs(opts)
1249 timer, fm = gettimer(ui, opts)
1252 timer, fm = gettimer(ui, opts)
1250 def d():
1253 def d():
1251 if os.name != r'nt':
1254 if os.name != r'nt':
1252 os.system(b"HGRCPATH= %s version -q > /dev/null" %
1255 os.system(b"HGRCPATH= %s version -q > /dev/null" %
1253 fsencode(sys.argv[0]))
1256 fsencode(sys.argv[0]))
1254 else:
1257 else:
1255 os.environ[r'HGRCPATH'] = r' '
1258 os.environ[r'HGRCPATH'] = r' '
1256 os.system(r"%s version -q > NUL" % sys.argv[0])
1259 os.system(r"%s version -q > NUL" % sys.argv[0])
1257 timer(d)
1260 timer(d)
1258 fm.end()
1261 fm.end()
1259
1262
1260 @command(b'perfparents', formatteropts)
1263 @command(b'perfparents', formatteropts)
1261 def perfparents(ui, repo, **opts):
1264 def perfparents(ui, repo, **opts):
1262 """benchmark the time necessary to fetch one changeset's parents.
1265 """benchmark the time necessary to fetch one changeset's parents.
1263
1266
1264 The fetch is done using the `node identifier`, traversing all object layers
1267 The fetch is done using the `node identifier`, traversing all object layers
1265 from the repository object. The first N revisions will be used for this
1268 from the repository object. The first N revisions will be used for this
1266 benchmark. N is controlled by the ``perf.parentscount`` config option
1269 benchmark. N is controlled by the ``perf.parentscount`` config option
1267 (default: 1000).
1270 (default: 1000).
1268 """
1271 """
1269 opts = _byteskwargs(opts)
1272 opts = _byteskwargs(opts)
1270 timer, fm = gettimer(ui, opts)
1273 timer, fm = gettimer(ui, opts)
1271 # control the number of commits perfparents iterates over
1274 # control the number of commits perfparents iterates over
1272 # experimental config: perf.parentscount
1275 # experimental config: perf.parentscount
1273 count = getint(ui, b"perf", b"parentscount", 1000)
1276 count = getint(ui, b"perf", b"parentscount", 1000)
1274 if len(repo.changelog) < count:
1277 if len(repo.changelog) < count:
1275 raise error.Abort(b"repo needs %d commits for this test" % count)
1278 raise error.Abort(b"repo needs %d commits for this test" % count)
1276 repo = repo.unfiltered()
1279 repo = repo.unfiltered()
1277 nl = [repo.changelog.node(i) for i in _xrange(count)]
1280 nl = [repo.changelog.node(i) for i in _xrange(count)]
1278 def d():
1281 def d():
1279 for n in nl:
1282 for n in nl:
1280 repo.changelog.parents(n)
1283 repo.changelog.parents(n)
1281 timer(d)
1284 timer(d)
1282 fm.end()
1285 fm.end()
1283
1286
1284 @command(b'perfctxfiles', formatteropts)
1287 @command(b'perfctxfiles', formatteropts)
1285 def perfctxfiles(ui, repo, x, **opts):
1288 def perfctxfiles(ui, repo, x, **opts):
1286 opts = _byteskwargs(opts)
1289 opts = _byteskwargs(opts)
1287 x = int(x)
1290 x = int(x)
1288 timer, fm = gettimer(ui, opts)
1291 timer, fm = gettimer(ui, opts)
1289 def d():
1292 def d():
1290 len(repo[x].files())
1293 len(repo[x].files())
1291 timer(d)
1294 timer(d)
1292 fm.end()
1295 fm.end()
1293
1296
1294 @command(b'perfrawfiles', formatteropts)
1297 @command(b'perfrawfiles', formatteropts)
1295 def perfrawfiles(ui, repo, x, **opts):
1298 def perfrawfiles(ui, repo, x, **opts):
1296 opts = _byteskwargs(opts)
1299 opts = _byteskwargs(opts)
1297 x = int(x)
1300 x = int(x)
1298 timer, fm = gettimer(ui, opts)
1301 timer, fm = gettimer(ui, opts)
1299 cl = repo.changelog
1302 cl = repo.changelog
1300 def d():
1303 def d():
1301 len(cl.read(x)[3])
1304 len(cl.read(x)[3])
1302 timer(d)
1305 timer(d)
1303 fm.end()
1306 fm.end()
1304
1307
1305 @command(b'perflookup', formatteropts)
1308 @command(b'perflookup', formatteropts)
1306 def perflookup(ui, repo, rev, **opts):
1309 def perflookup(ui, repo, rev, **opts):
1307 opts = _byteskwargs(opts)
1310 opts = _byteskwargs(opts)
1308 timer, fm = gettimer(ui, opts)
1311 timer, fm = gettimer(ui, opts)
1309 timer(lambda: len(repo.lookup(rev)))
1312 timer(lambda: len(repo.lookup(rev)))
1310 fm.end()
1313 fm.end()
1311
1314
1312 @command(b'perflinelogedits',
1315 @command(b'perflinelogedits',
1313 [(b'n', b'edits', 10000, b'number of edits'),
1316 [(b'n', b'edits', 10000, b'number of edits'),
1314 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1317 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1315 ], norepo=True)
1318 ], norepo=True)
1316 def perflinelogedits(ui, **opts):
1319 def perflinelogedits(ui, **opts):
1317 from mercurial import linelog
1320 from mercurial import linelog
1318
1321
1319 opts = _byteskwargs(opts)
1322 opts = _byteskwargs(opts)
1320
1323
1321 edits = opts[b'edits']
1324 edits = opts[b'edits']
1322 maxhunklines = opts[b'max_hunk_lines']
1325 maxhunklines = opts[b'max_hunk_lines']
1323
1326
1324 maxb1 = 100000
1327 maxb1 = 100000
1325 random.seed(0)
1328 random.seed(0)
1326 randint = random.randint
1329 randint = random.randint
1327 currentlines = 0
1330 currentlines = 0
1328 arglist = []
1331 arglist = []
1329 for rev in _xrange(edits):
1332 for rev in _xrange(edits):
1330 a1 = randint(0, currentlines)
1333 a1 = randint(0, currentlines)
1331 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1334 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1332 b1 = randint(0, maxb1)
1335 b1 = randint(0, maxb1)
1333 b2 = randint(b1, b1 + maxhunklines)
1336 b2 = randint(b1, b1 + maxhunklines)
1334 currentlines += (b2 - b1) - (a2 - a1)
1337 currentlines += (b2 - b1) - (a2 - a1)
1335 arglist.append((rev, a1, a2, b1, b2))
1338 arglist.append((rev, a1, a2, b1, b2))
1336
1339
1337 def d():
1340 def d():
1338 ll = linelog.linelog()
1341 ll = linelog.linelog()
1339 for args in arglist:
1342 for args in arglist:
1340 ll.replacelines(*args)
1343 ll.replacelines(*args)
1341
1344
1342 timer, fm = gettimer(ui, opts)
1345 timer, fm = gettimer(ui, opts)
1343 timer(d)
1346 timer(d)
1344 fm.end()
1347 fm.end()
1345
1348
1346 @command(b'perfrevrange', formatteropts)
1349 @command(b'perfrevrange', formatteropts)
1347 def perfrevrange(ui, repo, *specs, **opts):
1350 def perfrevrange(ui, repo, *specs, **opts):
1348 opts = _byteskwargs(opts)
1351 opts = _byteskwargs(opts)
1349 timer, fm = gettimer(ui, opts)
1352 timer, fm = gettimer(ui, opts)
1350 revrange = scmutil.revrange
1353 revrange = scmutil.revrange
1351 timer(lambda: len(revrange(repo, specs)))
1354 timer(lambda: len(revrange(repo, specs)))
1352 fm.end()
1355 fm.end()
1353
1356
1354 @command(b'perfnodelookup', formatteropts)
1357 @command(b'perfnodelookup', formatteropts)
1355 def perfnodelookup(ui, repo, rev, **opts):
1358 def perfnodelookup(ui, repo, rev, **opts):
1356 opts = _byteskwargs(opts)
1359 opts = _byteskwargs(opts)
1357 timer, fm = gettimer(ui, opts)
1360 timer, fm = gettimer(ui, opts)
1358 import mercurial.revlog
1361 import mercurial.revlog
1359 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1362 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1360 n = scmutil.revsingle(repo, rev).node()
1363 n = scmutil.revsingle(repo, rev).node()
1361 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1364 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1362 def d():
1365 def d():
1363 cl.rev(n)
1366 cl.rev(n)
1364 clearcaches(cl)
1367 clearcaches(cl)
1365 timer(d)
1368 timer(d)
1366 fm.end()
1369 fm.end()
1367
1370
1368 @command(b'perflog',
1371 @command(b'perflog',
1369 [(b'', b'rename', False, b'ask log to follow renames')
1372 [(b'', b'rename', False, b'ask log to follow renames')
1370 ] + formatteropts)
1373 ] + formatteropts)
1371 def perflog(ui, repo, rev=None, **opts):
1374 def perflog(ui, repo, rev=None, **opts):
1372 opts = _byteskwargs(opts)
1375 opts = _byteskwargs(opts)
1373 if rev is None:
1376 if rev is None:
1374 rev=[]
1377 rev=[]
1375 timer, fm = gettimer(ui, opts)
1378 timer, fm = gettimer(ui, opts)
1376 ui.pushbuffer()
1379 ui.pushbuffer()
1377 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1380 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1378 copies=opts.get(b'rename')))
1381 copies=opts.get(b'rename')))
1379 ui.popbuffer()
1382 ui.popbuffer()
1380 fm.end()
1383 fm.end()
1381
1384
1382 @command(b'perfmoonwalk', formatteropts)
1385 @command(b'perfmoonwalk', formatteropts)
1383 def perfmoonwalk(ui, repo, **opts):
1386 def perfmoonwalk(ui, repo, **opts):
1384 """benchmark walking the changelog backwards
1387 """benchmark walking the changelog backwards
1385
1388
1386 This also loads the changelog data for each revision in the changelog.
1389 This also loads the changelog data for each revision in the changelog.
1387 """
1390 """
1388 opts = _byteskwargs(opts)
1391 opts = _byteskwargs(opts)
1389 timer, fm = gettimer(ui, opts)
1392 timer, fm = gettimer(ui, opts)
1390 def moonwalk():
1393 def moonwalk():
1391 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1394 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1392 ctx = repo[i]
1395 ctx = repo[i]
1393 ctx.branch() # read changelog data (in addition to the index)
1396 ctx.branch() # read changelog data (in addition to the index)
1394 timer(moonwalk)
1397 timer(moonwalk)
1395 fm.end()
1398 fm.end()
1396
1399
1397 @command(b'perftemplating',
1400 @command(b'perftemplating',
1398 [(b'r', b'rev', [], b'revisions to run the template on'),
1401 [(b'r', b'rev', [], b'revisions to run the template on'),
1399 ] + formatteropts)
1402 ] + formatteropts)
1400 def perftemplating(ui, repo, testedtemplate=None, **opts):
1403 def perftemplating(ui, repo, testedtemplate=None, **opts):
1401 """test the rendering time of a given template"""
1404 """test the rendering time of a given template"""
1402 if makelogtemplater is None:
1405 if makelogtemplater is None:
1403 raise error.Abort((b"perftemplating not available with this Mercurial"),
1406 raise error.Abort((b"perftemplating not available with this Mercurial"),
1404 hint=b"use 4.3 or later")
1407 hint=b"use 4.3 or later")
1405
1408
1406 opts = _byteskwargs(opts)
1409 opts = _byteskwargs(opts)
1407
1410
1408 nullui = ui.copy()
1411 nullui = ui.copy()
1409 nullui.fout = open(os.devnull, r'wb')
1412 nullui.fout = open(os.devnull, r'wb')
1410 nullui.disablepager()
1413 nullui.disablepager()
1411 revs = opts.get(b'rev')
1414 revs = opts.get(b'rev')
1412 if not revs:
1415 if not revs:
1413 revs = [b'all()']
1416 revs = [b'all()']
1414 revs = list(scmutil.revrange(repo, revs))
1417 revs = list(scmutil.revrange(repo, revs))
1415
1418
1416 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1419 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1417 b' {author|person}: {desc|firstline}\n')
1420 b' {author|person}: {desc|firstline}\n')
1418 if testedtemplate is None:
1421 if testedtemplate is None:
1419 testedtemplate = defaulttemplate
1422 testedtemplate = defaulttemplate
1420 displayer = makelogtemplater(nullui, repo, testedtemplate)
1423 displayer = makelogtemplater(nullui, repo, testedtemplate)
1421 def format():
1424 def format():
1422 for r in revs:
1425 for r in revs:
1423 ctx = repo[r]
1426 ctx = repo[r]
1424 displayer.show(ctx)
1427 displayer.show(ctx)
1425 displayer.flush(ctx)
1428 displayer.flush(ctx)
1426
1429
1427 timer, fm = gettimer(ui, opts)
1430 timer, fm = gettimer(ui, opts)
1428 timer(format)
1431 timer(format)
1429 fm.end()
1432 fm.end()
1430
1433
1431 @command(b'perfhelper-pathcopies', formatteropts +
1434 @command(b'perfhelper-pathcopies', formatteropts +
1432 [
1435 [
1433 (b'r', b'revs', [], b'restrict search to these revisions'),
1436 (b'r', b'revs', [], b'restrict search to these revisions'),
1434 (b'', b'timing', False, b'provides extra data (costly)'),
1437 (b'', b'timing', False, b'provides extra data (costly)'),
1435 ])
1438 ])
1436 def perfhelperpathcopies(ui, repo, revs=[], **opts):
1439 def perfhelperpathcopies(ui, repo, revs=[], **opts):
1437 """find statistic about potential parameters for the `perftracecopies`
1440 """find statistic about potential parameters for the `perftracecopies`
1438
1441
1439 This command find source-destination pair relevant for copytracing testing.
1442 This command find source-destination pair relevant for copytracing testing.
1440 It report value for some of the parameters that impact copy tracing time.
1443 It report value for some of the parameters that impact copy tracing time.
1441
1444
1442 If `--timing` is set, rename detection is run and the associated timing
1445 If `--timing` is set, rename detection is run and the associated timing
1443 will be reported. The extra details comes at the cost of a slower command
1446 will be reported. The extra details comes at the cost of a slower command
1444 execution.
1447 execution.
1445
1448
1446 Since the rename detection is only run once, other factors might easily
1449 Since the rename detection is only run once, other factors might easily
1447 affect the precision of the timing. However it should give a good
1450 affect the precision of the timing. However it should give a good
1448 approximation of which revision pairs are very costly.
1451 approximation of which revision pairs are very costly.
1449 """
1452 """
1450 opts = _byteskwargs(opts)
1453 opts = _byteskwargs(opts)
1451 fm = ui.formatter(b'perf', opts)
1454 fm = ui.formatter(b'perf', opts)
1452 dotiming = opts[b'timing']
1455 dotiming = opts[b'timing']
1453
1456
1454 if dotiming:
1457 if dotiming:
1455 header = '%12s %12s %12s %12s %12s %12s\n'
1458 header = '%12s %12s %12s %12s %12s %12s\n'
1456 output = ("%(source)12s %(destination)12s "
1459 output = ("%(source)12s %(destination)12s "
1457 "%(nbrevs)12d %(nbmissingfiles)12d "
1460 "%(nbrevs)12d %(nbmissingfiles)12d "
1458 "%(nbrenamedfiles)12d %(time)18.5f\n")
1461 "%(nbrenamedfiles)12d %(time)18.5f\n")
1459 header_names = ("source", "destination", "nb-revs", "nb-files",
1462 header_names = ("source", "destination", "nb-revs", "nb-files",
1460 "nb-renames", "time")
1463 "nb-renames", "time")
1461 fm.plain(header % header_names)
1464 fm.plain(header % header_names)
1462 else:
1465 else:
1463 header = '%12s %12s %12s %12s\n'
1466 header = '%12s %12s %12s %12s\n'
1464 output = ("%(source)12s %(destination)12s "
1467 output = ("%(source)12s %(destination)12s "
1465 "%(nbrevs)12d %(nbmissingfiles)12d\n")
1468 "%(nbrevs)12d %(nbmissingfiles)12d\n")
1466 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
1469 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
1467
1470
1468 if not revs:
1471 if not revs:
1469 revs = ['all()']
1472 revs = ['all()']
1470 revs = scmutil.revrange(repo, revs)
1473 revs = scmutil.revrange(repo, revs)
1471
1474
1472 roi = repo.revs('merge() and %ld', revs)
1475 roi = repo.revs('merge() and %ld', revs)
1473 for r in roi:
1476 for r in roi:
1474 ctx = repo[r]
1477 ctx = repo[r]
1475 p1 = ctx.p1().rev()
1478 p1 = ctx.p1().rev()
1476 p2 = ctx.p2().rev()
1479 p2 = ctx.p2().rev()
1477 bases = repo.changelog._commonancestorsheads(p1, p2)
1480 bases = repo.changelog._commonancestorsheads(p1, p2)
1478 for p in (p1, p2):
1481 for p in (p1, p2):
1479 for b in bases:
1482 for b in bases:
1480 base = repo[b]
1483 base = repo[b]
1481 parent = repo[p]
1484 parent = repo[p]
1482 missing = copies._computeforwardmissing(base, parent)
1485 missing = copies._computeforwardmissing(base, parent)
1483 if not missing:
1486 if not missing:
1484 continue
1487 continue
1485 data = {
1488 data = {
1486 b'source': base.hex(),
1489 b'source': base.hex(),
1487 b'destination': parent.hex(),
1490 b'destination': parent.hex(),
1488 b'nbrevs': len(repo.revs('%d::%d', b, p)),
1491 b'nbrevs': len(repo.revs('%d::%d', b, p)),
1489 b'nbmissingfiles': len(missing),
1492 b'nbmissingfiles': len(missing),
1490 }
1493 }
1491 if dotiming:
1494 if dotiming:
1492 begin = util.timer()
1495 begin = util.timer()
1493 renames = copies.pathcopies(base, parent)
1496 renames = copies.pathcopies(base, parent)
1494 end = util.timer()
1497 end = util.timer()
1495 # not very stable timing since we did only one run
1498 # not very stable timing since we did only one run
1496 data['time'] = end - begin
1499 data['time'] = end - begin
1497 data['nbrenamedfiles'] = len(renames)
1500 data['nbrenamedfiles'] = len(renames)
1498 fm.startitem()
1501 fm.startitem()
1499 fm.data(**data)
1502 fm.data(**data)
1500 out = data.copy()
1503 out = data.copy()
1501 out['source'] = fm.hexfunc(base.node())
1504 out['source'] = fm.hexfunc(base.node())
1502 out['destination'] = fm.hexfunc(parent.node())
1505 out['destination'] = fm.hexfunc(parent.node())
1503 fm.plain(output % out)
1506 fm.plain(output % out)
1504
1507
1505 fm.end()
1508 fm.end()
1506
1509
1507 @command(b'perfcca', formatteropts)
1510 @command(b'perfcca', formatteropts)
1508 def perfcca(ui, repo, **opts):
1511 def perfcca(ui, repo, **opts):
1509 opts = _byteskwargs(opts)
1512 opts = _byteskwargs(opts)
1510 timer, fm = gettimer(ui, opts)
1513 timer, fm = gettimer(ui, opts)
1511 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1514 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1512 fm.end()
1515 fm.end()
1513
1516
1514 @command(b'perffncacheload', formatteropts)
1517 @command(b'perffncacheload', formatteropts)
1515 def perffncacheload(ui, repo, **opts):
1518 def perffncacheload(ui, repo, **opts):
1516 opts = _byteskwargs(opts)
1519 opts = _byteskwargs(opts)
1517 timer, fm = gettimer(ui, opts)
1520 timer, fm = gettimer(ui, opts)
1518 s = repo.store
1521 s = repo.store
1519 def d():
1522 def d():
1520 s.fncache._load()
1523 s.fncache._load()
1521 timer(d)
1524 timer(d)
1522 fm.end()
1525 fm.end()
1523
1526
1524 @command(b'perffncachewrite', formatteropts)
1527 @command(b'perffncachewrite', formatteropts)
1525 def perffncachewrite(ui, repo, **opts):
1528 def perffncachewrite(ui, repo, **opts):
1526 opts = _byteskwargs(opts)
1529 opts = _byteskwargs(opts)
1527 timer, fm = gettimer(ui, opts)
1530 timer, fm = gettimer(ui, opts)
1528 s = repo.store
1531 s = repo.store
1529 lock = repo.lock()
1532 lock = repo.lock()
1530 s.fncache._load()
1533 s.fncache._load()
1531 tr = repo.transaction(b'perffncachewrite')
1534 tr = repo.transaction(b'perffncachewrite')
1532 tr.addbackup(b'fncache')
1535 tr.addbackup(b'fncache')
1533 def d():
1536 def d():
1534 s.fncache._dirty = True
1537 s.fncache._dirty = True
1535 s.fncache.write(tr)
1538 s.fncache.write(tr)
1536 timer(d)
1539 timer(d)
1537 tr.close()
1540 tr.close()
1538 lock.release()
1541 lock.release()
1539 fm.end()
1542 fm.end()
1540
1543
1541 @command(b'perffncacheencode', formatteropts)
1544 @command(b'perffncacheencode', formatteropts)
1542 def perffncacheencode(ui, repo, **opts):
1545 def perffncacheencode(ui, repo, **opts):
1543 opts = _byteskwargs(opts)
1546 opts = _byteskwargs(opts)
1544 timer, fm = gettimer(ui, opts)
1547 timer, fm = gettimer(ui, opts)
1545 s = repo.store
1548 s = repo.store
1546 s.fncache._load()
1549 s.fncache._load()
1547 def d():
1550 def d():
1548 for p in s.fncache.entries:
1551 for p in s.fncache.entries:
1549 s.encode(p)
1552 s.encode(p)
1550 timer(d)
1553 timer(d)
1551 fm.end()
1554 fm.end()
1552
1555
1553 def _bdiffworker(q, blocks, xdiff, ready, done):
1556 def _bdiffworker(q, blocks, xdiff, ready, done):
1554 while not done.is_set():
1557 while not done.is_set():
1555 pair = q.get()
1558 pair = q.get()
1556 while pair is not None:
1559 while pair is not None:
1557 if xdiff:
1560 if xdiff:
1558 mdiff.bdiff.xdiffblocks(*pair)
1561 mdiff.bdiff.xdiffblocks(*pair)
1559 elif blocks:
1562 elif blocks:
1560 mdiff.bdiff.blocks(*pair)
1563 mdiff.bdiff.blocks(*pair)
1561 else:
1564 else:
1562 mdiff.textdiff(*pair)
1565 mdiff.textdiff(*pair)
1563 q.task_done()
1566 q.task_done()
1564 pair = q.get()
1567 pair = q.get()
1565 q.task_done() # for the None one
1568 q.task_done() # for the None one
1566 with ready:
1569 with ready:
1567 ready.wait()
1570 ready.wait()
1568
1571
1569 def _manifestrevision(repo, mnode):
1572 def _manifestrevision(repo, mnode):
1570 ml = repo.manifestlog
1573 ml = repo.manifestlog
1571
1574
1572 if util.safehasattr(ml, b'getstorage'):
1575 if util.safehasattr(ml, b'getstorage'):
1573 store = ml.getstorage(b'')
1576 store = ml.getstorage(b'')
1574 else:
1577 else:
1575 store = ml._revlog
1578 store = ml._revlog
1576
1579
1577 return store.revision(mnode)
1580 return store.revision(mnode)
1578
1581
1579 @command(b'perfbdiff', revlogopts + formatteropts + [
1582 @command(b'perfbdiff', revlogopts + formatteropts + [
1580 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1583 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1581 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1584 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1582 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1585 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1583 (b'', b'blocks', False, b'test computing diffs into blocks'),
1586 (b'', b'blocks', False, b'test computing diffs into blocks'),
1584 (b'', b'xdiff', False, b'use xdiff algorithm'),
1587 (b'', b'xdiff', False, b'use xdiff algorithm'),
1585 ],
1588 ],
1586
1589
1587 b'-c|-m|FILE REV')
1590 b'-c|-m|FILE REV')
1588 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1591 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1589 """benchmark a bdiff between revisions
1592 """benchmark a bdiff between revisions
1590
1593
1591 By default, benchmark a bdiff between its delta parent and itself.
1594 By default, benchmark a bdiff between its delta parent and itself.
1592
1595
1593 With ``--count``, benchmark bdiffs between delta parents and self for N
1596 With ``--count``, benchmark bdiffs between delta parents and self for N
1594 revisions starting at the specified revision.
1597 revisions starting at the specified revision.
1595
1598
1596 With ``--alldata``, assume the requested revision is a changeset and
1599 With ``--alldata``, assume the requested revision is a changeset and
1597 measure bdiffs for all changes related to that changeset (manifest
1600 measure bdiffs for all changes related to that changeset (manifest
1598 and filelogs).
1601 and filelogs).
1599 """
1602 """
1600 opts = _byteskwargs(opts)
1603 opts = _byteskwargs(opts)
1601
1604
1602 if opts[b'xdiff'] and not opts[b'blocks']:
1605 if opts[b'xdiff'] and not opts[b'blocks']:
1603 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1606 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1604
1607
1605 if opts[b'alldata']:
1608 if opts[b'alldata']:
1606 opts[b'changelog'] = True
1609 opts[b'changelog'] = True
1607
1610
1608 if opts.get(b'changelog') or opts.get(b'manifest'):
1611 if opts.get(b'changelog') or opts.get(b'manifest'):
1609 file_, rev = None, file_
1612 file_, rev = None, file_
1610 elif rev is None:
1613 elif rev is None:
1611 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1614 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1612
1615
1613 blocks = opts[b'blocks']
1616 blocks = opts[b'blocks']
1614 xdiff = opts[b'xdiff']
1617 xdiff = opts[b'xdiff']
1615 textpairs = []
1618 textpairs = []
1616
1619
1617 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1620 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1618
1621
1619 startrev = r.rev(r.lookup(rev))
1622 startrev = r.rev(r.lookup(rev))
1620 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1623 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1621 if opts[b'alldata']:
1624 if opts[b'alldata']:
1622 # Load revisions associated with changeset.
1625 # Load revisions associated with changeset.
1623 ctx = repo[rev]
1626 ctx = repo[rev]
1624 mtext = _manifestrevision(repo, ctx.manifestnode())
1627 mtext = _manifestrevision(repo, ctx.manifestnode())
1625 for pctx in ctx.parents():
1628 for pctx in ctx.parents():
1626 pman = _manifestrevision(repo, pctx.manifestnode())
1629 pman = _manifestrevision(repo, pctx.manifestnode())
1627 textpairs.append((pman, mtext))
1630 textpairs.append((pman, mtext))
1628
1631
1629 # Load filelog revisions by iterating manifest delta.
1632 # Load filelog revisions by iterating manifest delta.
1630 man = ctx.manifest()
1633 man = ctx.manifest()
1631 pman = ctx.p1().manifest()
1634 pman = ctx.p1().manifest()
1632 for filename, change in pman.diff(man).items():
1635 for filename, change in pman.diff(man).items():
1633 fctx = repo.file(filename)
1636 fctx = repo.file(filename)
1634 f1 = fctx.revision(change[0][0] or -1)
1637 f1 = fctx.revision(change[0][0] or -1)
1635 f2 = fctx.revision(change[1][0] or -1)
1638 f2 = fctx.revision(change[1][0] or -1)
1636 textpairs.append((f1, f2))
1639 textpairs.append((f1, f2))
1637 else:
1640 else:
1638 dp = r.deltaparent(rev)
1641 dp = r.deltaparent(rev)
1639 textpairs.append((r.revision(dp), r.revision(rev)))
1642 textpairs.append((r.revision(dp), r.revision(rev)))
1640
1643
1641 withthreads = threads > 0
1644 withthreads = threads > 0
1642 if not withthreads:
1645 if not withthreads:
1643 def d():
1646 def d():
1644 for pair in textpairs:
1647 for pair in textpairs:
1645 if xdiff:
1648 if xdiff:
1646 mdiff.bdiff.xdiffblocks(*pair)
1649 mdiff.bdiff.xdiffblocks(*pair)
1647 elif blocks:
1650 elif blocks:
1648 mdiff.bdiff.blocks(*pair)
1651 mdiff.bdiff.blocks(*pair)
1649 else:
1652 else:
1650 mdiff.textdiff(*pair)
1653 mdiff.textdiff(*pair)
1651 else:
1654 else:
1652 q = queue()
1655 q = queue()
1653 for i in _xrange(threads):
1656 for i in _xrange(threads):
1654 q.put(None)
1657 q.put(None)
1655 ready = threading.Condition()
1658 ready = threading.Condition()
1656 done = threading.Event()
1659 done = threading.Event()
1657 for i in _xrange(threads):
1660 for i in _xrange(threads):
1658 threading.Thread(target=_bdiffworker,
1661 threading.Thread(target=_bdiffworker,
1659 args=(q, blocks, xdiff, ready, done)).start()
1662 args=(q, blocks, xdiff, ready, done)).start()
1660 q.join()
1663 q.join()
1661 def d():
1664 def d():
1662 for pair in textpairs:
1665 for pair in textpairs:
1663 q.put(pair)
1666 q.put(pair)
1664 for i in _xrange(threads):
1667 for i in _xrange(threads):
1665 q.put(None)
1668 q.put(None)
1666 with ready:
1669 with ready:
1667 ready.notify_all()
1670 ready.notify_all()
1668 q.join()
1671 q.join()
1669 timer, fm = gettimer(ui, opts)
1672 timer, fm = gettimer(ui, opts)
1670 timer(d)
1673 timer(d)
1671 fm.end()
1674 fm.end()
1672
1675
1673 if withthreads:
1676 if withthreads:
1674 done.set()
1677 done.set()
1675 for i in _xrange(threads):
1678 for i in _xrange(threads):
1676 q.put(None)
1679 q.put(None)
1677 with ready:
1680 with ready:
1678 ready.notify_all()
1681 ready.notify_all()
1679
1682
1680 @command(b'perfunidiff', revlogopts + formatteropts + [
1683 @command(b'perfunidiff', revlogopts + formatteropts + [
1681 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1684 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1682 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
1685 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
1683 ], b'-c|-m|FILE REV')
1686 ], b'-c|-m|FILE REV')
1684 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1687 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1685 """benchmark a unified diff between revisions
1688 """benchmark a unified diff between revisions
1686
1689
1687 This doesn't include any copy tracing - it's just a unified diff
1690 This doesn't include any copy tracing - it's just a unified diff
1688 of the texts.
1691 of the texts.
1689
1692
1690 By default, benchmark a diff between its delta parent and itself.
1693 By default, benchmark a diff between its delta parent and itself.
1691
1694
1692 With ``--count``, benchmark diffs between delta parents and self for N
1695 With ``--count``, benchmark diffs between delta parents and self for N
1693 revisions starting at the specified revision.
1696 revisions starting at the specified revision.
1694
1697
1695 With ``--alldata``, assume the requested revision is a changeset and
1698 With ``--alldata``, assume the requested revision is a changeset and
1696 measure diffs for all changes related to that changeset (manifest
1699 measure diffs for all changes related to that changeset (manifest
1697 and filelogs).
1700 and filelogs).
1698 """
1701 """
1699 opts = _byteskwargs(opts)
1702 opts = _byteskwargs(opts)
1700 if opts[b'alldata']:
1703 if opts[b'alldata']:
1701 opts[b'changelog'] = True
1704 opts[b'changelog'] = True
1702
1705
1703 if opts.get(b'changelog') or opts.get(b'manifest'):
1706 if opts.get(b'changelog') or opts.get(b'manifest'):
1704 file_, rev = None, file_
1707 file_, rev = None, file_
1705 elif rev is None:
1708 elif rev is None:
1706 raise error.CommandError(b'perfunidiff', b'invalid arguments')
1709 raise error.CommandError(b'perfunidiff', b'invalid arguments')
1707
1710
1708 textpairs = []
1711 textpairs = []
1709
1712
1710 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
1713 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
1711
1714
1712 startrev = r.rev(r.lookup(rev))
1715 startrev = r.rev(r.lookup(rev))
1713 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1716 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1714 if opts[b'alldata']:
1717 if opts[b'alldata']:
1715 # Load revisions associated with changeset.
1718 # Load revisions associated with changeset.
1716 ctx = repo[rev]
1719 ctx = repo[rev]
1717 mtext = _manifestrevision(repo, ctx.manifestnode())
1720 mtext = _manifestrevision(repo, ctx.manifestnode())
1718 for pctx in ctx.parents():
1721 for pctx in ctx.parents():
1719 pman = _manifestrevision(repo, pctx.manifestnode())
1722 pman = _manifestrevision(repo, pctx.manifestnode())
1720 textpairs.append((pman, mtext))
1723 textpairs.append((pman, mtext))
1721
1724
1722 # Load filelog revisions by iterating manifest delta.
1725 # Load filelog revisions by iterating manifest delta.
1723 man = ctx.manifest()
1726 man = ctx.manifest()
1724 pman = ctx.p1().manifest()
1727 pman = ctx.p1().manifest()
1725 for filename, change in pman.diff(man).items():
1728 for filename, change in pman.diff(man).items():
1726 fctx = repo.file(filename)
1729 fctx = repo.file(filename)
1727 f1 = fctx.revision(change[0][0] or -1)
1730 f1 = fctx.revision(change[0][0] or -1)
1728 f2 = fctx.revision(change[1][0] or -1)
1731 f2 = fctx.revision(change[1][0] or -1)
1729 textpairs.append((f1, f2))
1732 textpairs.append((f1, f2))
1730 else:
1733 else:
1731 dp = r.deltaparent(rev)
1734 dp = r.deltaparent(rev)
1732 textpairs.append((r.revision(dp), r.revision(rev)))
1735 textpairs.append((r.revision(dp), r.revision(rev)))
1733
1736
1734 def d():
1737 def d():
1735 for left, right in textpairs:
1738 for left, right in textpairs:
1736 # The date strings don't matter, so we pass empty strings.
1739 # The date strings don't matter, so we pass empty strings.
1737 headerlines, hunks = mdiff.unidiff(
1740 headerlines, hunks = mdiff.unidiff(
1738 left, b'', right, b'', b'left', b'right', binary=False)
1741 left, b'', right, b'', b'left', b'right', binary=False)
1739 # consume iterators in roughly the way patch.py does
1742 # consume iterators in roughly the way patch.py does
1740 b'\n'.join(headerlines)
1743 b'\n'.join(headerlines)
1741 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1744 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1742 timer, fm = gettimer(ui, opts)
1745 timer, fm = gettimer(ui, opts)
1743 timer(d)
1746 timer(d)
1744 fm.end()
1747 fm.end()
1745
1748
1746 @command(b'perfdiffwd', formatteropts)
1749 @command(b'perfdiffwd', formatteropts)
1747 def perfdiffwd(ui, repo, **opts):
1750 def perfdiffwd(ui, repo, **opts):
1748 """Profile diff of working directory changes"""
1751 """Profile diff of working directory changes"""
1749 opts = _byteskwargs(opts)
1752 opts = _byteskwargs(opts)
1750 timer, fm = gettimer(ui, opts)
1753 timer, fm = gettimer(ui, opts)
1751 options = {
1754 options = {
1752 'w': 'ignore_all_space',
1755 'w': 'ignore_all_space',
1753 'b': 'ignore_space_change',
1756 'b': 'ignore_space_change',
1754 'B': 'ignore_blank_lines',
1757 'B': 'ignore_blank_lines',
1755 }
1758 }
1756
1759
1757 for diffopt in ('', 'w', 'b', 'B', 'wB'):
1760 for diffopt in ('', 'w', 'b', 'B', 'wB'):
1758 opts = dict((options[c], b'1') for c in diffopt)
1761 opts = dict((options[c], b'1') for c in diffopt)
1759 def d():
1762 def d():
1760 ui.pushbuffer()
1763 ui.pushbuffer()
1761 commands.diff(ui, repo, **opts)
1764 commands.diff(ui, repo, **opts)
1762 ui.popbuffer()
1765 ui.popbuffer()
1763 diffopt = diffopt.encode('ascii')
1766 diffopt = diffopt.encode('ascii')
1764 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
1767 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
1765 timer(d, title=title)
1768 timer(d, title=title)
1766 fm.end()
1769 fm.end()
1767
1770
1768 @command(b'perfrevlogindex', revlogopts + formatteropts,
1771 @command(b'perfrevlogindex', revlogopts + formatteropts,
1769 b'-c|-m|FILE')
1772 b'-c|-m|FILE')
1770 def perfrevlogindex(ui, repo, file_=None, **opts):
1773 def perfrevlogindex(ui, repo, file_=None, **opts):
1771 """Benchmark operations against a revlog index.
1774 """Benchmark operations against a revlog index.
1772
1775
1773 This tests constructing a revlog instance, reading index data,
1776 This tests constructing a revlog instance, reading index data,
1774 parsing index data, and performing various operations related to
1777 parsing index data, and performing various operations related to
1775 index data.
1778 index data.
1776 """
1779 """
1777
1780
1778 opts = _byteskwargs(opts)
1781 opts = _byteskwargs(opts)
1779
1782
1780 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
1783 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
1781
1784
1782 opener = getattr(rl, 'opener') # trick linter
1785 opener = getattr(rl, 'opener') # trick linter
1783 indexfile = rl.indexfile
1786 indexfile = rl.indexfile
1784 data = opener.read(indexfile)
1787 data = opener.read(indexfile)
1785
1788
1786 header = struct.unpack(b'>I', data[0:4])[0]
1789 header = struct.unpack(b'>I', data[0:4])[0]
1787 version = header & 0xFFFF
1790 version = header & 0xFFFF
1788 if version == 1:
1791 if version == 1:
1789 revlogio = revlog.revlogio()
1792 revlogio = revlog.revlogio()
1790 inline = header & (1 << 16)
1793 inline = header & (1 << 16)
1791 else:
1794 else:
1792 raise error.Abort((b'unsupported revlog version: %d') % version)
1795 raise error.Abort((b'unsupported revlog version: %d') % version)
1793
1796
1794 rllen = len(rl)
1797 rllen = len(rl)
1795
1798
1796 node0 = rl.node(0)
1799 node0 = rl.node(0)
1797 node25 = rl.node(rllen // 4)
1800 node25 = rl.node(rllen // 4)
1798 node50 = rl.node(rllen // 2)
1801 node50 = rl.node(rllen // 2)
1799 node75 = rl.node(rllen // 4 * 3)
1802 node75 = rl.node(rllen // 4 * 3)
1800 node100 = rl.node(rllen - 1)
1803 node100 = rl.node(rllen - 1)
1801
1804
1802 allrevs = range(rllen)
1805 allrevs = range(rllen)
1803 allrevsrev = list(reversed(allrevs))
1806 allrevsrev = list(reversed(allrevs))
1804 allnodes = [rl.node(rev) for rev in range(rllen)]
1807 allnodes = [rl.node(rev) for rev in range(rllen)]
1805 allnodesrev = list(reversed(allnodes))
1808 allnodesrev = list(reversed(allnodes))
1806
1809
1807 def constructor():
1810 def constructor():
1808 revlog.revlog(opener, indexfile)
1811 revlog.revlog(opener, indexfile)
1809
1812
1810 def read():
1813 def read():
1811 with opener(indexfile) as fh:
1814 with opener(indexfile) as fh:
1812 fh.read()
1815 fh.read()
1813
1816
1814 def parseindex():
1817 def parseindex():
1815 revlogio.parseindex(data, inline)
1818 revlogio.parseindex(data, inline)
1816
1819
1817 def getentry(revornode):
1820 def getentry(revornode):
1818 index = revlogio.parseindex(data, inline)[0]
1821 index = revlogio.parseindex(data, inline)[0]
1819 index[revornode]
1822 index[revornode]
1820
1823
1821 def getentries(revs, count=1):
1824 def getentries(revs, count=1):
1822 index = revlogio.parseindex(data, inline)[0]
1825 index = revlogio.parseindex(data, inline)[0]
1823
1826
1824 for i in range(count):
1827 for i in range(count):
1825 for rev in revs:
1828 for rev in revs:
1826 index[rev]
1829 index[rev]
1827
1830
1828 def resolvenode(node):
1831 def resolvenode(node):
1829 nodemap = revlogio.parseindex(data, inline)[1]
1832 nodemap = revlogio.parseindex(data, inline)[1]
1830 # This only works for the C code.
1833 # This only works for the C code.
1831 if nodemap is None:
1834 if nodemap is None:
1832 return
1835 return
1833
1836
1834 try:
1837 try:
1835 nodemap[node]
1838 nodemap[node]
1836 except error.RevlogError:
1839 except error.RevlogError:
1837 pass
1840 pass
1838
1841
1839 def resolvenodes(nodes, count=1):
1842 def resolvenodes(nodes, count=1):
1840 nodemap = revlogio.parseindex(data, inline)[1]
1843 nodemap = revlogio.parseindex(data, inline)[1]
1841 if nodemap is None:
1844 if nodemap is None:
1842 return
1845 return
1843
1846
1844 for i in range(count):
1847 for i in range(count):
1845 for node in nodes:
1848 for node in nodes:
1846 try:
1849 try:
1847 nodemap[node]
1850 nodemap[node]
1848 except error.RevlogError:
1851 except error.RevlogError:
1849 pass
1852 pass
1850
1853
1851 benches = [
1854 benches = [
1852 (constructor, b'revlog constructor'),
1855 (constructor, b'revlog constructor'),
1853 (read, b'read'),
1856 (read, b'read'),
1854 (parseindex, b'create index object'),
1857 (parseindex, b'create index object'),
1855 (lambda: getentry(0), b'retrieve index entry for rev 0'),
1858 (lambda: getentry(0), b'retrieve index entry for rev 0'),
1856 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
1859 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
1857 (lambda: resolvenode(node0), b'look up node at rev 0'),
1860 (lambda: resolvenode(node0), b'look up node at rev 0'),
1858 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
1861 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
1859 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
1862 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
1860 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
1863 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
1861 (lambda: resolvenode(node100), b'look up node at tip'),
1864 (lambda: resolvenode(node100), b'look up node at tip'),
1862 # 2x variation is to measure caching impact.
1865 # 2x variation is to measure caching impact.
1863 (lambda: resolvenodes(allnodes),
1866 (lambda: resolvenodes(allnodes),
1864 b'look up all nodes (forward)'),
1867 b'look up all nodes (forward)'),
1865 (lambda: resolvenodes(allnodes, 2),
1868 (lambda: resolvenodes(allnodes, 2),
1866 b'look up all nodes 2x (forward)'),
1869 b'look up all nodes 2x (forward)'),
1867 (lambda: resolvenodes(allnodesrev),
1870 (lambda: resolvenodes(allnodesrev),
1868 b'look up all nodes (reverse)'),
1871 b'look up all nodes (reverse)'),
1869 (lambda: resolvenodes(allnodesrev, 2),
1872 (lambda: resolvenodes(allnodesrev, 2),
1870 b'look up all nodes 2x (reverse)'),
1873 b'look up all nodes 2x (reverse)'),
1871 (lambda: getentries(allrevs),
1874 (lambda: getentries(allrevs),
1872 b'retrieve all index entries (forward)'),
1875 b'retrieve all index entries (forward)'),
1873 (lambda: getentries(allrevs, 2),
1876 (lambda: getentries(allrevs, 2),
1874 b'retrieve all index entries 2x (forward)'),
1877 b'retrieve all index entries 2x (forward)'),
1875 (lambda: getentries(allrevsrev),
1878 (lambda: getentries(allrevsrev),
1876 b'retrieve all index entries (reverse)'),
1879 b'retrieve all index entries (reverse)'),
1877 (lambda: getentries(allrevsrev, 2),
1880 (lambda: getentries(allrevsrev, 2),
1878 b'retrieve all index entries 2x (reverse)'),
1881 b'retrieve all index entries 2x (reverse)'),
1879 ]
1882 ]
1880
1883
1881 for fn, title in benches:
1884 for fn, title in benches:
1882 timer, fm = gettimer(ui, opts)
1885 timer, fm = gettimer(ui, opts)
1883 timer(fn, title=title)
1886 timer(fn, title=title)
1884 fm.end()
1887 fm.end()
1885
1888
1886 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
1889 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
1887 [(b'd', b'dist', 100, b'distance between the revisions'),
1890 [(b'd', b'dist', 100, b'distance between the revisions'),
1888 (b's', b'startrev', 0, b'revision to start reading at'),
1891 (b's', b'startrev', 0, b'revision to start reading at'),
1889 (b'', b'reverse', False, b'read in reverse')],
1892 (b'', b'reverse', False, b'read in reverse')],
1890 b'-c|-m|FILE')
1893 b'-c|-m|FILE')
1891 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
1894 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
1892 **opts):
1895 **opts):
1893 """Benchmark reading a series of revisions from a revlog.
1896 """Benchmark reading a series of revisions from a revlog.
1894
1897
1895 By default, we read every ``-d/--dist`` revision from 0 to tip of
1898 By default, we read every ``-d/--dist`` revision from 0 to tip of
1896 the specified revlog.
1899 the specified revlog.
1897
1900
1898 The start revision can be defined via ``-s/--startrev``.
1901 The start revision can be defined via ``-s/--startrev``.
1899 """
1902 """
1900 opts = _byteskwargs(opts)
1903 opts = _byteskwargs(opts)
1901
1904
1902 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
1905 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
1903 rllen = getlen(ui)(rl)
1906 rllen = getlen(ui)(rl)
1904
1907
1905 if startrev < 0:
1908 if startrev < 0:
1906 startrev = rllen + startrev
1909 startrev = rllen + startrev
1907
1910
1908 def d():
1911 def d():
1909 rl.clearcaches()
1912 rl.clearcaches()
1910
1913
1911 beginrev = startrev
1914 beginrev = startrev
1912 endrev = rllen
1915 endrev = rllen
1913 dist = opts[b'dist']
1916 dist = opts[b'dist']
1914
1917
1915 if reverse:
1918 if reverse:
1916 beginrev, endrev = endrev - 1, beginrev - 1
1919 beginrev, endrev = endrev - 1, beginrev - 1
1917 dist = -1 * dist
1920 dist = -1 * dist
1918
1921
1919 for x in _xrange(beginrev, endrev, dist):
1922 for x in _xrange(beginrev, endrev, dist):
1920 # Old revisions don't support passing int.
1923 # Old revisions don't support passing int.
1921 n = rl.node(x)
1924 n = rl.node(x)
1922 rl.revision(n)
1925 rl.revision(n)
1923
1926
1924 timer, fm = gettimer(ui, opts)
1927 timer, fm = gettimer(ui, opts)
1925 timer(d)
1928 timer(d)
1926 fm.end()
1929 fm.end()
1927
1930
1928 @command(b'perfrevlogwrite', revlogopts + formatteropts +
1931 @command(b'perfrevlogwrite', revlogopts + formatteropts +
1929 [(b's', b'startrev', 1000, b'revision to start writing at'),
1932 [(b's', b'startrev', 1000, b'revision to start writing at'),
1930 (b'', b'stoprev', -1, b'last revision to write'),
1933 (b'', b'stoprev', -1, b'last revision to write'),
1931 (b'', b'count', 3, b'last revision to write'),
1934 (b'', b'count', 3, b'last revision to write'),
1932 (b'', b'details', False, b'print timing for every revisions tested'),
1935 (b'', b'details', False, b'print timing for every revisions tested'),
1933 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
1936 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
1934 (b'', b'lazydeltabase', True, b'try the provided delta first'),
1937 (b'', b'lazydeltabase', True, b'try the provided delta first'),
1935 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1938 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1936 ],
1939 ],
1937 b'-c|-m|FILE')
1940 b'-c|-m|FILE')
1938 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
1941 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
1939 """Benchmark writing a series of revisions to a revlog.
1942 """Benchmark writing a series of revisions to a revlog.
1940
1943
1941 Possible source values are:
1944 Possible source values are:
1942 * `full`: add from a full text (default).
1945 * `full`: add from a full text (default).
1943 * `parent-1`: add from a delta to the first parent
1946 * `parent-1`: add from a delta to the first parent
1944 * `parent-2`: add from a delta to the second parent if it exists
1947 * `parent-2`: add from a delta to the second parent if it exists
1945 (use a delta from the first parent otherwise)
1948 (use a delta from the first parent otherwise)
1946 * `parent-smallest`: add from the smallest delta (either p1 or p2)
1949 * `parent-smallest`: add from the smallest delta (either p1 or p2)
1947 * `storage`: add from the existing precomputed deltas
1950 * `storage`: add from the existing precomputed deltas
1948 """
1951 """
1949 opts = _byteskwargs(opts)
1952 opts = _byteskwargs(opts)
1950
1953
1951 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
1954 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
1952 rllen = getlen(ui)(rl)
1955 rllen = getlen(ui)(rl)
1953 if startrev < 0:
1956 if startrev < 0:
1954 startrev = rllen + startrev
1957 startrev = rllen + startrev
1955 if stoprev < 0:
1958 if stoprev < 0:
1956 stoprev = rllen + stoprev
1959 stoprev = rllen + stoprev
1957
1960
1958 lazydeltabase = opts['lazydeltabase']
1961 lazydeltabase = opts['lazydeltabase']
1959 source = opts['source']
1962 source = opts['source']
1960 clearcaches = opts['clear_caches']
1963 clearcaches = opts['clear_caches']
1961 validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest',
1964 validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest',
1962 b'storage')
1965 b'storage')
1963 if source not in validsource:
1966 if source not in validsource:
1964 raise error.Abort('invalid source type: %s' % source)
1967 raise error.Abort('invalid source type: %s' % source)
1965
1968
1966 ### actually gather results
1969 ### actually gather results
1967 count = opts['count']
1970 count = opts['count']
1968 if count <= 0:
1971 if count <= 0:
1969 raise error.Abort('invalide run count: %d' % count)
1972 raise error.Abort('invalide run count: %d' % count)
1970 allresults = []
1973 allresults = []
1971 for c in range(count):
1974 for c in range(count):
1972 timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1,
1975 timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1,
1973 lazydeltabase=lazydeltabase,
1976 lazydeltabase=lazydeltabase,
1974 clearcaches=clearcaches)
1977 clearcaches=clearcaches)
1975 allresults.append(timing)
1978 allresults.append(timing)
1976
1979
1977 ### consolidate the results in a single list
1980 ### consolidate the results in a single list
1978 results = []
1981 results = []
1979 for idx, (rev, t) in enumerate(allresults[0]):
1982 for idx, (rev, t) in enumerate(allresults[0]):
1980 ts = [t]
1983 ts = [t]
1981 for other in allresults[1:]:
1984 for other in allresults[1:]:
1982 orev, ot = other[idx]
1985 orev, ot = other[idx]
1983 assert orev == rev
1986 assert orev == rev
1984 ts.append(ot)
1987 ts.append(ot)
1985 results.append((rev, ts))
1988 results.append((rev, ts))
1986 resultcount = len(results)
1989 resultcount = len(results)
1987
1990
1988 ### Compute and display relevant statistics
1991 ### Compute and display relevant statistics
1989
1992
1990 # get a formatter
1993 # get a formatter
1991 fm = ui.formatter(b'perf', opts)
1994 fm = ui.formatter(b'perf', opts)
1992 displayall = ui.configbool(b"perf", b"all-timing", False)
1995 displayall = ui.configbool(b"perf", b"all-timing", False)
1993
1996
1994 # print individual details if requested
1997 # print individual details if requested
1995 if opts['details']:
1998 if opts['details']:
1996 for idx, item in enumerate(results, 1):
1999 for idx, item in enumerate(results, 1):
1997 rev, data = item
2000 rev, data = item
1998 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
2001 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
1999 formatone(fm, data, title=title, displayall=displayall)
2002 formatone(fm, data, title=title, displayall=displayall)
2000
2003
2001 # sorts results by median time
2004 # sorts results by median time
2002 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
2005 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
2003 # list of (name, index) to display)
2006 # list of (name, index) to display)
2004 relevants = [
2007 relevants = [
2005 ("min", 0),
2008 ("min", 0),
2006 ("10%", resultcount * 10 // 100),
2009 ("10%", resultcount * 10 // 100),
2007 ("25%", resultcount * 25 // 100),
2010 ("25%", resultcount * 25 // 100),
2008 ("50%", resultcount * 70 // 100),
2011 ("50%", resultcount * 70 // 100),
2009 ("75%", resultcount * 75 // 100),
2012 ("75%", resultcount * 75 // 100),
2010 ("90%", resultcount * 90 // 100),
2013 ("90%", resultcount * 90 // 100),
2011 ("95%", resultcount * 95 // 100),
2014 ("95%", resultcount * 95 // 100),
2012 ("99%", resultcount * 99 // 100),
2015 ("99%", resultcount * 99 // 100),
2013 ("99.9%", resultcount * 999 // 1000),
2016 ("99.9%", resultcount * 999 // 1000),
2014 ("99.99%", resultcount * 9999 // 10000),
2017 ("99.99%", resultcount * 9999 // 10000),
2015 ("99.999%", resultcount * 99999 // 100000),
2018 ("99.999%", resultcount * 99999 // 100000),
2016 ("max", -1),
2019 ("max", -1),
2017 ]
2020 ]
2018 if not ui.quiet:
2021 if not ui.quiet:
2019 for name, idx in relevants:
2022 for name, idx in relevants:
2020 data = results[idx]
2023 data = results[idx]
2021 title = '%s of %d, rev %d' % (name, resultcount, data[0])
2024 title = '%s of %d, rev %d' % (name, resultcount, data[0])
2022 formatone(fm, data[1], title=title, displayall=displayall)
2025 formatone(fm, data[1], title=title, displayall=displayall)
2023
2026
2024 # XXX summing that many float will not be very precise, we ignore this fact
2027 # XXX summing that many float will not be very precise, we ignore this fact
2025 # for now
2028 # for now
2026 totaltime = []
2029 totaltime = []
2027 for item in allresults:
2030 for item in allresults:
2028 totaltime.append((sum(x[1][0] for x in item),
2031 totaltime.append((sum(x[1][0] for x in item),
2029 sum(x[1][1] for x in item),
2032 sum(x[1][1] for x in item),
2030 sum(x[1][2] for x in item),)
2033 sum(x[1][2] for x in item),)
2031 )
2034 )
2032 formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
2035 formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
2033 displayall=displayall)
2036 displayall=displayall)
2034 fm.end()
2037 fm.end()
2035
2038
2036 class _faketr(object):
2039 class _faketr(object):
2037 def add(s, x, y, z=None):
2040 def add(s, x, y, z=None):
2038 return None
2041 return None
2039
2042
2040 def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None,
2043 def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None,
2041 lazydeltabase=True, clearcaches=True):
2044 lazydeltabase=True, clearcaches=True):
2042 timings = []
2045 timings = []
2043 tr = _faketr()
2046 tr = _faketr()
2044 with _temprevlog(ui, orig, startrev) as dest:
2047 with _temprevlog(ui, orig, startrev) as dest:
2045 dest._lazydeltabase = lazydeltabase
2048 dest._lazydeltabase = lazydeltabase
2046 revs = list(orig.revs(startrev, stoprev))
2049 revs = list(orig.revs(startrev, stoprev))
2047 total = len(revs)
2050 total = len(revs)
2048 topic = 'adding'
2051 topic = 'adding'
2049 if runidx is not None:
2052 if runidx is not None:
2050 topic += ' (run #%d)' % runidx
2053 topic += ' (run #%d)' % runidx
2051 # Support both old and new progress API
2054 # Support both old and new progress API
2052 if util.safehasattr(ui, 'makeprogress'):
2055 if util.safehasattr(ui, 'makeprogress'):
2053 progress = ui.makeprogress(topic, unit='revs', total=total)
2056 progress = ui.makeprogress(topic, unit='revs', total=total)
2054 def updateprogress(pos):
2057 def updateprogress(pos):
2055 progress.update(pos)
2058 progress.update(pos)
2056 def completeprogress():
2059 def completeprogress():
2057 progress.complete()
2060 progress.complete()
2058 else:
2061 else:
2059 def updateprogress(pos):
2062 def updateprogress(pos):
2060 ui.progress(topic, pos, unit='revs', total=total)
2063 ui.progress(topic, pos, unit='revs', total=total)
2061 def completeprogress():
2064 def completeprogress():
2062 ui.progress(topic, None, unit='revs', total=total)
2065 ui.progress(topic, None, unit='revs', total=total)
2063
2066
2064 for idx, rev in enumerate(revs):
2067 for idx, rev in enumerate(revs):
2065 updateprogress(idx)
2068 updateprogress(idx)
2066 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
2069 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
2067 if clearcaches:
2070 if clearcaches:
2068 dest.index.clearcaches()
2071 dest.index.clearcaches()
2069 dest.clearcaches()
2072 dest.clearcaches()
2070 with timeone() as r:
2073 with timeone() as r:
2071 dest.addrawrevision(*addargs, **addkwargs)
2074 dest.addrawrevision(*addargs, **addkwargs)
2072 timings.append((rev, r[0]))
2075 timings.append((rev, r[0]))
2073 updateprogress(total)
2076 updateprogress(total)
2074 completeprogress()
2077 completeprogress()
2075 return timings
2078 return timings
2076
2079
2077 def _getrevisionseed(orig, rev, tr, source):
2080 def _getrevisionseed(orig, rev, tr, source):
2078 from mercurial.node import nullid
2081 from mercurial.node import nullid
2079
2082
2080 linkrev = orig.linkrev(rev)
2083 linkrev = orig.linkrev(rev)
2081 node = orig.node(rev)
2084 node = orig.node(rev)
2082 p1, p2 = orig.parents(node)
2085 p1, p2 = orig.parents(node)
2083 flags = orig.flags(rev)
2086 flags = orig.flags(rev)
2084 cachedelta = None
2087 cachedelta = None
2085 text = None
2088 text = None
2086
2089
2087 if source == b'full':
2090 if source == b'full':
2088 text = orig.revision(rev)
2091 text = orig.revision(rev)
2089 elif source == b'parent-1':
2092 elif source == b'parent-1':
2090 baserev = orig.rev(p1)
2093 baserev = orig.rev(p1)
2091 cachedelta = (baserev, orig.revdiff(p1, rev))
2094 cachedelta = (baserev, orig.revdiff(p1, rev))
2092 elif source == b'parent-2':
2095 elif source == b'parent-2':
2093 parent = p2
2096 parent = p2
2094 if p2 == nullid:
2097 if p2 == nullid:
2095 parent = p1
2098 parent = p1
2096 baserev = orig.rev(parent)
2099 baserev = orig.rev(parent)
2097 cachedelta = (baserev, orig.revdiff(parent, rev))
2100 cachedelta = (baserev, orig.revdiff(parent, rev))
2098 elif source == b'parent-smallest':
2101 elif source == b'parent-smallest':
2099 p1diff = orig.revdiff(p1, rev)
2102 p1diff = orig.revdiff(p1, rev)
2100 parent = p1
2103 parent = p1
2101 diff = p1diff
2104 diff = p1diff
2102 if p2 != nullid:
2105 if p2 != nullid:
2103 p2diff = orig.revdiff(p2, rev)
2106 p2diff = orig.revdiff(p2, rev)
2104 if len(p1diff) > len(p2diff):
2107 if len(p1diff) > len(p2diff):
2105 parent = p2
2108 parent = p2
2106 diff = p2diff
2109 diff = p2diff
2107 baserev = orig.rev(parent)
2110 baserev = orig.rev(parent)
2108 cachedelta = (baserev, diff)
2111 cachedelta = (baserev, diff)
2109 elif source == b'storage':
2112 elif source == b'storage':
2110 baserev = orig.deltaparent(rev)
2113 baserev = orig.deltaparent(rev)
2111 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
2114 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
2112
2115
2113 return ((text, tr, linkrev, p1, p2),
2116 return ((text, tr, linkrev, p1, p2),
2114 {'node': node, 'flags': flags, 'cachedelta': cachedelta})
2117 {'node': node, 'flags': flags, 'cachedelta': cachedelta})
2115
2118
2116 @contextlib.contextmanager
2119 @contextlib.contextmanager
2117 def _temprevlog(ui, orig, truncaterev):
2120 def _temprevlog(ui, orig, truncaterev):
2118 from mercurial import vfs as vfsmod
2121 from mercurial import vfs as vfsmod
2119
2122
2120 if orig._inline:
2123 if orig._inline:
2121 raise error.Abort('not supporting inline revlog (yet)')
2124 raise error.Abort('not supporting inline revlog (yet)')
2122
2125
2123 origindexpath = orig.opener.join(orig.indexfile)
2126 origindexpath = orig.opener.join(orig.indexfile)
2124 origdatapath = orig.opener.join(orig.datafile)
2127 origdatapath = orig.opener.join(orig.datafile)
2125 indexname = 'revlog.i'
2128 indexname = 'revlog.i'
2126 dataname = 'revlog.d'
2129 dataname = 'revlog.d'
2127
2130
2128 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
2131 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
2129 try:
2132 try:
2130 # copy the data file in a temporary directory
2133 # copy the data file in a temporary directory
2131 ui.debug('copying data in %s\n' % tmpdir)
2134 ui.debug('copying data in %s\n' % tmpdir)
2132 destindexpath = os.path.join(tmpdir, 'revlog.i')
2135 destindexpath = os.path.join(tmpdir, 'revlog.i')
2133 destdatapath = os.path.join(tmpdir, 'revlog.d')
2136 destdatapath = os.path.join(tmpdir, 'revlog.d')
2134 shutil.copyfile(origindexpath, destindexpath)
2137 shutil.copyfile(origindexpath, destindexpath)
2135 shutil.copyfile(origdatapath, destdatapath)
2138 shutil.copyfile(origdatapath, destdatapath)
2136
2139
2137 # remove the data we want to add again
2140 # remove the data we want to add again
2138 ui.debug('truncating data to be rewritten\n')
2141 ui.debug('truncating data to be rewritten\n')
2139 with open(destindexpath, 'ab') as index:
2142 with open(destindexpath, 'ab') as index:
2140 index.seek(0)
2143 index.seek(0)
2141 index.truncate(truncaterev * orig._io.size)
2144 index.truncate(truncaterev * orig._io.size)
2142 with open(destdatapath, 'ab') as data:
2145 with open(destdatapath, 'ab') as data:
2143 data.seek(0)
2146 data.seek(0)
2144 data.truncate(orig.start(truncaterev))
2147 data.truncate(orig.start(truncaterev))
2145
2148
2146 # instantiate a new revlog from the temporary copy
2149 # instantiate a new revlog from the temporary copy
2147 ui.debug('truncating adding to be rewritten\n')
2150 ui.debug('truncating adding to be rewritten\n')
2148 vfs = vfsmod.vfs(tmpdir)
2151 vfs = vfsmod.vfs(tmpdir)
2149 vfs.options = getattr(orig.opener, 'options', None)
2152 vfs.options = getattr(orig.opener, 'options', None)
2150
2153
2151 dest = revlog.revlog(vfs,
2154 dest = revlog.revlog(vfs,
2152 indexfile=indexname,
2155 indexfile=indexname,
2153 datafile=dataname)
2156 datafile=dataname)
2154 if dest._inline:
2157 if dest._inline:
2155 raise error.Abort('not supporting inline revlog (yet)')
2158 raise error.Abort('not supporting inline revlog (yet)')
2156 # make sure internals are initialized
2159 # make sure internals are initialized
2157 dest.revision(len(dest) - 1)
2160 dest.revision(len(dest) - 1)
2158 yield dest
2161 yield dest
2159 del dest, vfs
2162 del dest, vfs
2160 finally:
2163 finally:
2161 shutil.rmtree(tmpdir, True)
2164 shutil.rmtree(tmpdir, True)
2162
2165
2163 @command(b'perfrevlogchunks', revlogopts + formatteropts +
2166 @command(b'perfrevlogchunks', revlogopts + formatteropts +
2164 [(b'e', b'engines', b'', b'compression engines to use'),
2167 [(b'e', b'engines', b'', b'compression engines to use'),
2165 (b's', b'startrev', 0, b'revision to start at')],
2168 (b's', b'startrev', 0, b'revision to start at')],
2166 b'-c|-m|FILE')
2169 b'-c|-m|FILE')
2167 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
2170 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
2168 """Benchmark operations on revlog chunks.
2171 """Benchmark operations on revlog chunks.
2169
2172
2170 Logically, each revlog is a collection of fulltext revisions. However,
2173 Logically, each revlog is a collection of fulltext revisions. However,
2171 stored within each revlog are "chunks" of possibly compressed data. This
2174 stored within each revlog are "chunks" of possibly compressed data. This
2172 data needs to be read and decompressed or compressed and written.
2175 data needs to be read and decompressed or compressed and written.
2173
2176
2174 This command measures the time it takes to read+decompress and recompress
2177 This command measures the time it takes to read+decompress and recompress
2175 chunks in a revlog. It effectively isolates I/O and compression performance.
2178 chunks in a revlog. It effectively isolates I/O and compression performance.
2176 For measurements of higher-level operations like resolving revisions,
2179 For measurements of higher-level operations like resolving revisions,
2177 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
2180 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
2178 """
2181 """
2179 opts = _byteskwargs(opts)
2182 opts = _byteskwargs(opts)
2180
2183
2181 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
2184 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
2182
2185
2183 # _chunkraw was renamed to _getsegmentforrevs.
2186 # _chunkraw was renamed to _getsegmentforrevs.
2184 try:
2187 try:
2185 segmentforrevs = rl._getsegmentforrevs
2188 segmentforrevs = rl._getsegmentforrevs
2186 except AttributeError:
2189 except AttributeError:
2187 segmentforrevs = rl._chunkraw
2190 segmentforrevs = rl._chunkraw
2188
2191
2189 # Verify engines argument.
2192 # Verify engines argument.
2190 if engines:
2193 if engines:
2191 engines = set(e.strip() for e in engines.split(b','))
2194 engines = set(e.strip() for e in engines.split(b','))
2192 for engine in engines:
2195 for engine in engines:
2193 try:
2196 try:
2194 util.compressionengines[engine]
2197 util.compressionengines[engine]
2195 except KeyError:
2198 except KeyError:
2196 raise error.Abort(b'unknown compression engine: %s' % engine)
2199 raise error.Abort(b'unknown compression engine: %s' % engine)
2197 else:
2200 else:
2198 engines = []
2201 engines = []
2199 for e in util.compengines:
2202 for e in util.compengines:
2200 engine = util.compengines[e]
2203 engine = util.compengines[e]
2201 try:
2204 try:
2202 if engine.available():
2205 if engine.available():
2203 engine.revlogcompressor().compress(b'dummy')
2206 engine.revlogcompressor().compress(b'dummy')
2204 engines.append(e)
2207 engines.append(e)
2205 except NotImplementedError:
2208 except NotImplementedError:
2206 pass
2209 pass
2207
2210
2208 revs = list(rl.revs(startrev, len(rl) - 1))
2211 revs = list(rl.revs(startrev, len(rl) - 1))
2209
2212
2210 def rlfh(rl):
2213 def rlfh(rl):
2211 if rl._inline:
2214 if rl._inline:
2212 return getsvfs(repo)(rl.indexfile)
2215 return getsvfs(repo)(rl.indexfile)
2213 else:
2216 else:
2214 return getsvfs(repo)(rl.datafile)
2217 return getsvfs(repo)(rl.datafile)
2215
2218
2216 def doread():
2219 def doread():
2217 rl.clearcaches()
2220 rl.clearcaches()
2218 for rev in revs:
2221 for rev in revs:
2219 segmentforrevs(rev, rev)
2222 segmentforrevs(rev, rev)
2220
2223
2221 def doreadcachedfh():
2224 def doreadcachedfh():
2222 rl.clearcaches()
2225 rl.clearcaches()
2223 fh = rlfh(rl)
2226 fh = rlfh(rl)
2224 for rev in revs:
2227 for rev in revs:
2225 segmentforrevs(rev, rev, df=fh)
2228 segmentforrevs(rev, rev, df=fh)
2226
2229
2227 def doreadbatch():
2230 def doreadbatch():
2228 rl.clearcaches()
2231 rl.clearcaches()
2229 segmentforrevs(revs[0], revs[-1])
2232 segmentforrevs(revs[0], revs[-1])
2230
2233
2231 def doreadbatchcachedfh():
2234 def doreadbatchcachedfh():
2232 rl.clearcaches()
2235 rl.clearcaches()
2233 fh = rlfh(rl)
2236 fh = rlfh(rl)
2234 segmentforrevs(revs[0], revs[-1], df=fh)
2237 segmentforrevs(revs[0], revs[-1], df=fh)
2235
2238
2236 def dochunk():
2239 def dochunk():
2237 rl.clearcaches()
2240 rl.clearcaches()
2238 fh = rlfh(rl)
2241 fh = rlfh(rl)
2239 for rev in revs:
2242 for rev in revs:
2240 rl._chunk(rev, df=fh)
2243 rl._chunk(rev, df=fh)
2241
2244
2242 chunks = [None]
2245 chunks = [None]
2243
2246
2244 def dochunkbatch():
2247 def dochunkbatch():
2245 rl.clearcaches()
2248 rl.clearcaches()
2246 fh = rlfh(rl)
2249 fh = rlfh(rl)
2247 # Save chunks as a side-effect.
2250 # Save chunks as a side-effect.
2248 chunks[0] = rl._chunks(revs, df=fh)
2251 chunks[0] = rl._chunks(revs, df=fh)
2249
2252
2250 def docompress(compressor):
2253 def docompress(compressor):
2251 rl.clearcaches()
2254 rl.clearcaches()
2252
2255
2253 try:
2256 try:
2254 # Swap in the requested compression engine.
2257 # Swap in the requested compression engine.
2255 oldcompressor = rl._compressor
2258 oldcompressor = rl._compressor
2256 rl._compressor = compressor
2259 rl._compressor = compressor
2257 for chunk in chunks[0]:
2260 for chunk in chunks[0]:
2258 rl.compress(chunk)
2261 rl.compress(chunk)
2259 finally:
2262 finally:
2260 rl._compressor = oldcompressor
2263 rl._compressor = oldcompressor
2261
2264
2262 benches = [
2265 benches = [
2263 (lambda: doread(), b'read'),
2266 (lambda: doread(), b'read'),
2264 (lambda: doreadcachedfh(), b'read w/ reused fd'),
2267 (lambda: doreadcachedfh(), b'read w/ reused fd'),
2265 (lambda: doreadbatch(), b'read batch'),
2268 (lambda: doreadbatch(), b'read batch'),
2266 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
2269 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
2267 (lambda: dochunk(), b'chunk'),
2270 (lambda: dochunk(), b'chunk'),
2268 (lambda: dochunkbatch(), b'chunk batch'),
2271 (lambda: dochunkbatch(), b'chunk batch'),
2269 ]
2272 ]
2270
2273
2271 for engine in sorted(engines):
2274 for engine in sorted(engines):
2272 compressor = util.compengines[engine].revlogcompressor()
2275 compressor = util.compengines[engine].revlogcompressor()
2273 benches.append((functools.partial(docompress, compressor),
2276 benches.append((functools.partial(docompress, compressor),
2274 b'compress w/ %s' % engine))
2277 b'compress w/ %s' % engine))
2275
2278
2276 for fn, title in benches:
2279 for fn, title in benches:
2277 timer, fm = gettimer(ui, opts)
2280 timer, fm = gettimer(ui, opts)
2278 timer(fn, title=title)
2281 timer(fn, title=title)
2279 fm.end()
2282 fm.end()
2280
2283
2281 @command(b'perfrevlogrevision', revlogopts + formatteropts +
2284 @command(b'perfrevlogrevision', revlogopts + formatteropts +
2282 [(b'', b'cache', False, b'use caches instead of clearing')],
2285 [(b'', b'cache', False, b'use caches instead of clearing')],
2283 b'-c|-m|FILE REV')
2286 b'-c|-m|FILE REV')
2284 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
2287 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
2285 """Benchmark obtaining a revlog revision.
2288 """Benchmark obtaining a revlog revision.
2286
2289
2287 Obtaining a revlog revision consists of roughly the following steps:
2290 Obtaining a revlog revision consists of roughly the following steps:
2288
2291
2289 1. Compute the delta chain
2292 1. Compute the delta chain
2290 2. Slice the delta chain if applicable
2293 2. Slice the delta chain if applicable
2291 3. Obtain the raw chunks for that delta chain
2294 3. Obtain the raw chunks for that delta chain
2292 4. Decompress each raw chunk
2295 4. Decompress each raw chunk
2293 5. Apply binary patches to obtain fulltext
2296 5. Apply binary patches to obtain fulltext
2294 6. Verify hash of fulltext
2297 6. Verify hash of fulltext
2295
2298
2296 This command measures the time spent in each of these phases.
2299 This command measures the time spent in each of these phases.
2297 """
2300 """
2298 opts = _byteskwargs(opts)
2301 opts = _byteskwargs(opts)
2299
2302
2300 if opts.get(b'changelog') or opts.get(b'manifest'):
2303 if opts.get(b'changelog') or opts.get(b'manifest'):
2301 file_, rev = None, file_
2304 file_, rev = None, file_
2302 elif rev is None:
2305 elif rev is None:
2303 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
2306 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
2304
2307
2305 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
2308 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
2306
2309
2307 # _chunkraw was renamed to _getsegmentforrevs.
2310 # _chunkraw was renamed to _getsegmentforrevs.
2308 try:
2311 try:
2309 segmentforrevs = r._getsegmentforrevs
2312 segmentforrevs = r._getsegmentforrevs
2310 except AttributeError:
2313 except AttributeError:
2311 segmentforrevs = r._chunkraw
2314 segmentforrevs = r._chunkraw
2312
2315
2313 node = r.lookup(rev)
2316 node = r.lookup(rev)
2314 rev = r.rev(node)
2317 rev = r.rev(node)
2315
2318
2316 def getrawchunks(data, chain):
2319 def getrawchunks(data, chain):
2317 start = r.start
2320 start = r.start
2318 length = r.length
2321 length = r.length
2319 inline = r._inline
2322 inline = r._inline
2320 iosize = r._io.size
2323 iosize = r._io.size
2321 buffer = util.buffer
2324 buffer = util.buffer
2322
2325
2323 chunks = []
2326 chunks = []
2324 ladd = chunks.append
2327 ladd = chunks.append
2325 for idx, item in enumerate(chain):
2328 for idx, item in enumerate(chain):
2326 offset = start(item[0])
2329 offset = start(item[0])
2327 bits = data[idx]
2330 bits = data[idx]
2328 for rev in item:
2331 for rev in item:
2329 chunkstart = start(rev)
2332 chunkstart = start(rev)
2330 if inline:
2333 if inline:
2331 chunkstart += (rev + 1) * iosize
2334 chunkstart += (rev + 1) * iosize
2332 chunklength = length(rev)
2335 chunklength = length(rev)
2333 ladd(buffer(bits, chunkstart - offset, chunklength))
2336 ladd(buffer(bits, chunkstart - offset, chunklength))
2334
2337
2335 return chunks
2338 return chunks
2336
2339
2337 def dodeltachain(rev):
2340 def dodeltachain(rev):
2338 if not cache:
2341 if not cache:
2339 r.clearcaches()
2342 r.clearcaches()
2340 r._deltachain(rev)
2343 r._deltachain(rev)
2341
2344
2342 def doread(chain):
2345 def doread(chain):
2343 if not cache:
2346 if not cache:
2344 r.clearcaches()
2347 r.clearcaches()
2345 for item in slicedchain:
2348 for item in slicedchain:
2346 segmentforrevs(item[0], item[-1])
2349 segmentforrevs(item[0], item[-1])
2347
2350
2348 def doslice(r, chain, size):
2351 def doslice(r, chain, size):
2349 for s in slicechunk(r, chain, targetsize=size):
2352 for s in slicechunk(r, chain, targetsize=size):
2350 pass
2353 pass
2351
2354
2352 def dorawchunks(data, chain):
2355 def dorawchunks(data, chain):
2353 if not cache:
2356 if not cache:
2354 r.clearcaches()
2357 r.clearcaches()
2355 getrawchunks(data, chain)
2358 getrawchunks(data, chain)
2356
2359
2357 def dodecompress(chunks):
2360 def dodecompress(chunks):
2358 decomp = r.decompress
2361 decomp = r.decompress
2359 for chunk in chunks:
2362 for chunk in chunks:
2360 decomp(chunk)
2363 decomp(chunk)
2361
2364
2362 def dopatch(text, bins):
2365 def dopatch(text, bins):
2363 if not cache:
2366 if not cache:
2364 r.clearcaches()
2367 r.clearcaches()
2365 mdiff.patches(text, bins)
2368 mdiff.patches(text, bins)
2366
2369
2367 def dohash(text):
2370 def dohash(text):
2368 if not cache:
2371 if not cache:
2369 r.clearcaches()
2372 r.clearcaches()
2370 r.checkhash(text, node, rev=rev)
2373 r.checkhash(text, node, rev=rev)
2371
2374
2372 def dorevision():
2375 def dorevision():
2373 if not cache:
2376 if not cache:
2374 r.clearcaches()
2377 r.clearcaches()
2375 r.revision(node)
2378 r.revision(node)
2376
2379
2377 try:
2380 try:
2378 from mercurial.revlogutils.deltas import slicechunk
2381 from mercurial.revlogutils.deltas import slicechunk
2379 except ImportError:
2382 except ImportError:
2380 slicechunk = getattr(revlog, '_slicechunk', None)
2383 slicechunk = getattr(revlog, '_slicechunk', None)
2381
2384
2382 size = r.length(rev)
2385 size = r.length(rev)
2383 chain = r._deltachain(rev)[0]
2386 chain = r._deltachain(rev)[0]
2384 if not getattr(r, '_withsparseread', False):
2387 if not getattr(r, '_withsparseread', False):
2385 slicedchain = (chain,)
2388 slicedchain = (chain,)
2386 else:
2389 else:
2387 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
2390 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
2388 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
2391 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
2389 rawchunks = getrawchunks(data, slicedchain)
2392 rawchunks = getrawchunks(data, slicedchain)
2390 bins = r._chunks(chain)
2393 bins = r._chunks(chain)
2391 text = bytes(bins[0])
2394 text = bytes(bins[0])
2392 bins = bins[1:]
2395 bins = bins[1:]
2393 text = mdiff.patches(text, bins)
2396 text = mdiff.patches(text, bins)
2394
2397
2395 benches = [
2398 benches = [
2396 (lambda: dorevision(), b'full'),
2399 (lambda: dorevision(), b'full'),
2397 (lambda: dodeltachain(rev), b'deltachain'),
2400 (lambda: dodeltachain(rev), b'deltachain'),
2398 (lambda: doread(chain), b'read'),
2401 (lambda: doread(chain), b'read'),
2399 ]
2402 ]
2400
2403
2401 if getattr(r, '_withsparseread', False):
2404 if getattr(r, '_withsparseread', False):
2402 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
2405 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
2403 benches.append(slicing)
2406 benches.append(slicing)
2404
2407
2405 benches.extend([
2408 benches.extend([
2406 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
2409 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
2407 (lambda: dodecompress(rawchunks), b'decompress'),
2410 (lambda: dodecompress(rawchunks), b'decompress'),
2408 (lambda: dopatch(text, bins), b'patch'),
2411 (lambda: dopatch(text, bins), b'patch'),
2409 (lambda: dohash(text), b'hash'),
2412 (lambda: dohash(text), b'hash'),
2410 ])
2413 ])
2411
2414
2412 timer, fm = gettimer(ui, opts)
2415 timer, fm = gettimer(ui, opts)
2413 for fn, title in benches:
2416 for fn, title in benches:
2414 timer(fn, title=title)
2417 timer(fn, title=title)
2415 fm.end()
2418 fm.end()
2416
2419
2417 @command(b'perfrevset',
2420 @command(b'perfrevset',
2418 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
2421 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
2419 (b'', b'contexts', False, b'obtain changectx for each revision')]
2422 (b'', b'contexts', False, b'obtain changectx for each revision')]
2420 + formatteropts, b"REVSET")
2423 + formatteropts, b"REVSET")
2421 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
2424 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
2422 """benchmark the execution time of a revset
2425 """benchmark the execution time of a revset
2423
2426
2424 Use the --clean option if need to evaluate the impact of build volatile
2427 Use the --clean option if need to evaluate the impact of build volatile
2425 revisions set cache on the revset execution. Volatile cache hold filtered
2428 revisions set cache on the revset execution. Volatile cache hold filtered
2426 and obsolete related cache."""
2429 and obsolete related cache."""
2427 opts = _byteskwargs(opts)
2430 opts = _byteskwargs(opts)
2428
2431
2429 timer, fm = gettimer(ui, opts)
2432 timer, fm = gettimer(ui, opts)
2430 def d():
2433 def d():
2431 if clear:
2434 if clear:
2432 repo.invalidatevolatilesets()
2435 repo.invalidatevolatilesets()
2433 if contexts:
2436 if contexts:
2434 for ctx in repo.set(expr): pass
2437 for ctx in repo.set(expr): pass
2435 else:
2438 else:
2436 for r in repo.revs(expr): pass
2439 for r in repo.revs(expr): pass
2437 timer(d)
2440 timer(d)
2438 fm.end()
2441 fm.end()
2439
2442
2440 @command(b'perfvolatilesets',
2443 @command(b'perfvolatilesets',
2441 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
2444 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
2442 ] + formatteropts)
2445 ] + formatteropts)
2443 def perfvolatilesets(ui, repo, *names, **opts):
2446 def perfvolatilesets(ui, repo, *names, **opts):
2444 """benchmark the computation of various volatile set
2447 """benchmark the computation of various volatile set
2445
2448
2446 Volatile set computes element related to filtering and obsolescence."""
2449 Volatile set computes element related to filtering and obsolescence."""
2447 opts = _byteskwargs(opts)
2450 opts = _byteskwargs(opts)
2448 timer, fm = gettimer(ui, opts)
2451 timer, fm = gettimer(ui, opts)
2449 repo = repo.unfiltered()
2452 repo = repo.unfiltered()
2450
2453
2451 def getobs(name):
2454 def getobs(name):
2452 def d():
2455 def d():
2453 repo.invalidatevolatilesets()
2456 repo.invalidatevolatilesets()
2454 if opts[b'clear_obsstore']:
2457 if opts[b'clear_obsstore']:
2455 clearfilecache(repo, b'obsstore')
2458 clearfilecache(repo, b'obsstore')
2456 obsolete.getrevs(repo, name)
2459 obsolete.getrevs(repo, name)
2457 return d
2460 return d
2458
2461
2459 allobs = sorted(obsolete.cachefuncs)
2462 allobs = sorted(obsolete.cachefuncs)
2460 if names:
2463 if names:
2461 allobs = [n for n in allobs if n in names]
2464 allobs = [n for n in allobs if n in names]
2462
2465
2463 for name in allobs:
2466 for name in allobs:
2464 timer(getobs(name), title=name)
2467 timer(getobs(name), title=name)
2465
2468
2466 def getfiltered(name):
2469 def getfiltered(name):
2467 def d():
2470 def d():
2468 repo.invalidatevolatilesets()
2471 repo.invalidatevolatilesets()
2469 if opts[b'clear_obsstore']:
2472 if opts[b'clear_obsstore']:
2470 clearfilecache(repo, b'obsstore')
2473 clearfilecache(repo, b'obsstore')
2471 repoview.filterrevs(repo, name)
2474 repoview.filterrevs(repo, name)
2472 return d
2475 return d
2473
2476
2474 allfilter = sorted(repoview.filtertable)
2477 allfilter = sorted(repoview.filtertable)
2475 if names:
2478 if names:
2476 allfilter = [n for n in allfilter if n in names]
2479 allfilter = [n for n in allfilter if n in names]
2477
2480
2478 for name in allfilter:
2481 for name in allfilter:
2479 timer(getfiltered(name), title=name)
2482 timer(getfiltered(name), title=name)
2480 fm.end()
2483 fm.end()
2481
2484
2482 @command(b'perfbranchmap',
2485 @command(b'perfbranchmap',
2483 [(b'f', b'full', False,
2486 [(b'f', b'full', False,
2484 b'Includes build time of subset'),
2487 b'Includes build time of subset'),
2485 (b'', b'clear-revbranch', False,
2488 (b'', b'clear-revbranch', False,
2486 b'purge the revbranch cache between computation'),
2489 b'purge the revbranch cache between computation'),
2487 ] + formatteropts)
2490 ] + formatteropts)
2488 def perfbranchmap(ui, repo, *filternames, **opts):
2491 def perfbranchmap(ui, repo, *filternames, **opts):
2489 """benchmark the update of a branchmap
2492 """benchmark the update of a branchmap
2490
2493
2491 This benchmarks the full repo.branchmap() call with read and write disabled
2494 This benchmarks the full repo.branchmap() call with read and write disabled
2492 """
2495 """
2493 opts = _byteskwargs(opts)
2496 opts = _byteskwargs(opts)
2494 full = opts.get(b"full", False)
2497 full = opts.get(b"full", False)
2495 clear_revbranch = opts.get(b"clear_revbranch", False)
2498 clear_revbranch = opts.get(b"clear_revbranch", False)
2496 timer, fm = gettimer(ui, opts)
2499 timer, fm = gettimer(ui, opts)
2497 def getbranchmap(filtername):
2500 def getbranchmap(filtername):
2498 """generate a benchmark function for the filtername"""
2501 """generate a benchmark function for the filtername"""
2499 if filtername is None:
2502 if filtername is None:
2500 view = repo
2503 view = repo
2501 else:
2504 else:
2502 view = repo.filtered(filtername)
2505 view = repo.filtered(filtername)
2503 if util.safehasattr(view._branchcaches, '_per_filter'):
2506 if util.safehasattr(view._branchcaches, '_per_filter'):
2504 filtered = view._branchcaches._per_filter
2507 filtered = view._branchcaches._per_filter
2505 else:
2508 else:
2506 # older versions
2509 # older versions
2507 filtered = view._branchcaches
2510 filtered = view._branchcaches
2508 def d():
2511 def d():
2509 if clear_revbranch:
2512 if clear_revbranch:
2510 repo.revbranchcache()._clear()
2513 repo.revbranchcache()._clear()
2511 if full:
2514 if full:
2512 view._branchcaches.clear()
2515 view._branchcaches.clear()
2513 else:
2516 else:
2514 filtered.pop(filtername, None)
2517 filtered.pop(filtername, None)
2515 view.branchmap()
2518 view.branchmap()
2516 return d
2519 return d
2517 # add filter in smaller subset to bigger subset
2520 # add filter in smaller subset to bigger subset
2518 possiblefilters = set(repoview.filtertable)
2521 possiblefilters = set(repoview.filtertable)
2519 if filternames:
2522 if filternames:
2520 possiblefilters &= set(filternames)
2523 possiblefilters &= set(filternames)
2521 subsettable = getbranchmapsubsettable()
2524 subsettable = getbranchmapsubsettable()
2522 allfilters = []
2525 allfilters = []
2523 while possiblefilters:
2526 while possiblefilters:
2524 for name in possiblefilters:
2527 for name in possiblefilters:
2525 subset = subsettable.get(name)
2528 subset = subsettable.get(name)
2526 if subset not in possiblefilters:
2529 if subset not in possiblefilters:
2527 break
2530 break
2528 else:
2531 else:
2529 assert False, b'subset cycle %s!' % possiblefilters
2532 assert False, b'subset cycle %s!' % possiblefilters
2530 allfilters.append(name)
2533 allfilters.append(name)
2531 possiblefilters.remove(name)
2534 possiblefilters.remove(name)
2532
2535
2533 # warm the cache
2536 # warm the cache
2534 if not full:
2537 if not full:
2535 for name in allfilters:
2538 for name in allfilters:
2536 repo.filtered(name).branchmap()
2539 repo.filtered(name).branchmap()
2537 if not filternames or b'unfiltered' in filternames:
2540 if not filternames or b'unfiltered' in filternames:
2538 # add unfiltered
2541 # add unfiltered
2539 allfilters.append(None)
2542 allfilters.append(None)
2540
2543
2541 if util.safehasattr(branchmap.branchcache, 'fromfile'):
2544 if util.safehasattr(branchmap.branchcache, 'fromfile'):
2542 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
2545 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
2543 branchcacheread.set(classmethod(lambda *args: None))
2546 branchcacheread.set(classmethod(lambda *args: None))
2544 else:
2547 else:
2545 # older versions
2548 # older versions
2546 branchcacheread = safeattrsetter(branchmap, b'read')
2549 branchcacheread = safeattrsetter(branchmap, b'read')
2547 branchcacheread.set(lambda *args: None)
2550 branchcacheread.set(lambda *args: None)
2548 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
2551 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
2549 branchcachewrite.set(lambda *args: None)
2552 branchcachewrite.set(lambda *args: None)
2550 try:
2553 try:
2551 for name in allfilters:
2554 for name in allfilters:
2552 printname = name
2555 printname = name
2553 if name is None:
2556 if name is None:
2554 printname = b'unfiltered'
2557 printname = b'unfiltered'
2555 timer(getbranchmap(name), title=str(printname))
2558 timer(getbranchmap(name), title=str(printname))
2556 finally:
2559 finally:
2557 branchcacheread.restore()
2560 branchcacheread.restore()
2558 branchcachewrite.restore()
2561 branchcachewrite.restore()
2559 fm.end()
2562 fm.end()
2560
2563
2561 @command(b'perfbranchmapupdate', [
2564 @command(b'perfbranchmapupdate', [
2562 (b'', b'base', [], b'subset of revision to start from'),
2565 (b'', b'base', [], b'subset of revision to start from'),
2563 (b'', b'target', [], b'subset of revision to end with'),
2566 (b'', b'target', [], b'subset of revision to end with'),
2564 (b'', b'clear-caches', False, b'clear cache between each runs')
2567 (b'', b'clear-caches', False, b'clear cache between each runs')
2565 ] + formatteropts)
2568 ] + formatteropts)
2566 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
2569 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
2567 """benchmark branchmap update from for <base> revs to <target> revs
2570 """benchmark branchmap update from for <base> revs to <target> revs
2568
2571
2569 If `--clear-caches` is passed, the following items will be reset before
2572 If `--clear-caches` is passed, the following items will be reset before
2570 each update:
2573 each update:
2571 * the changelog instance and associated indexes
2574 * the changelog instance and associated indexes
2572 * the rev-branch-cache instance
2575 * the rev-branch-cache instance
2573
2576
2574 Examples:
2577 Examples:
2575
2578
2576 # update for the one last revision
2579 # update for the one last revision
2577 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
2580 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
2578
2581
2579 $ update for change coming with a new branch
2582 $ update for change coming with a new branch
2580 $ hg perfbranchmapupdate --base 'stable' --target 'default'
2583 $ hg perfbranchmapupdate --base 'stable' --target 'default'
2581 """
2584 """
2582 from mercurial import branchmap
2585 from mercurial import branchmap
2583 from mercurial import repoview
2586 from mercurial import repoview
2584 opts = _byteskwargs(opts)
2587 opts = _byteskwargs(opts)
2585 timer, fm = gettimer(ui, opts)
2588 timer, fm = gettimer(ui, opts)
2586 clearcaches = opts[b'clear_caches']
2589 clearcaches = opts[b'clear_caches']
2587 unfi = repo.unfiltered()
2590 unfi = repo.unfiltered()
2588 x = [None] # used to pass data between closure
2591 x = [None] # used to pass data between closure
2589
2592
2590 # we use a `list` here to avoid possible side effect from smartset
2593 # we use a `list` here to avoid possible side effect from smartset
2591 baserevs = list(scmutil.revrange(repo, base))
2594 baserevs = list(scmutil.revrange(repo, base))
2592 targetrevs = list(scmutil.revrange(repo, target))
2595 targetrevs = list(scmutil.revrange(repo, target))
2593 if not baserevs:
2596 if not baserevs:
2594 raise error.Abort(b'no revisions selected for --base')
2597 raise error.Abort(b'no revisions selected for --base')
2595 if not targetrevs:
2598 if not targetrevs:
2596 raise error.Abort(b'no revisions selected for --target')
2599 raise error.Abort(b'no revisions selected for --target')
2597
2600
2598 # make sure the target branchmap also contains the one in the base
2601 # make sure the target branchmap also contains the one in the base
2599 targetrevs = list(set(baserevs) | set(targetrevs))
2602 targetrevs = list(set(baserevs) | set(targetrevs))
2600 targetrevs.sort()
2603 targetrevs.sort()
2601
2604
2602 cl = repo.changelog
2605 cl = repo.changelog
2603 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
2606 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
2604 allbaserevs.sort()
2607 allbaserevs.sort()
2605 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
2608 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
2606
2609
2607 newrevs = list(alltargetrevs.difference(allbaserevs))
2610 newrevs = list(alltargetrevs.difference(allbaserevs))
2608 newrevs.sort()
2611 newrevs.sort()
2609
2612
2610 allrevs = frozenset(unfi.changelog.revs())
2613 allrevs = frozenset(unfi.changelog.revs())
2611 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
2614 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
2612 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
2615 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
2613
2616
2614 def basefilter(repo, visibilityexceptions=None):
2617 def basefilter(repo, visibilityexceptions=None):
2615 return basefilterrevs
2618 return basefilterrevs
2616
2619
2617 def targetfilter(repo, visibilityexceptions=None):
2620 def targetfilter(repo, visibilityexceptions=None):
2618 return targetfilterrevs
2621 return targetfilterrevs
2619
2622
2620 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
2623 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
2621 ui.status(msg % (len(allbaserevs), len(newrevs)))
2624 ui.status(msg % (len(allbaserevs), len(newrevs)))
2622 if targetfilterrevs:
2625 if targetfilterrevs:
2623 msg = b'(%d revisions still filtered)\n'
2626 msg = b'(%d revisions still filtered)\n'
2624 ui.status(msg % len(targetfilterrevs))
2627 ui.status(msg % len(targetfilterrevs))
2625
2628
2626 try:
2629 try:
2627 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
2630 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
2628 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
2631 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
2629
2632
2630 baserepo = repo.filtered(b'__perf_branchmap_update_base')
2633 baserepo = repo.filtered(b'__perf_branchmap_update_base')
2631 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
2634 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
2632
2635
2633 # try to find an existing branchmap to reuse
2636 # try to find an existing branchmap to reuse
2634 subsettable = getbranchmapsubsettable()
2637 subsettable = getbranchmapsubsettable()
2635 candidatefilter = subsettable.get(None)
2638 candidatefilter = subsettable.get(None)
2636 while candidatefilter is not None:
2639 while candidatefilter is not None:
2637 candidatebm = repo.filtered(candidatefilter).branchmap()
2640 candidatebm = repo.filtered(candidatefilter).branchmap()
2638 if candidatebm.validfor(baserepo):
2641 if candidatebm.validfor(baserepo):
2639 filtered = repoview.filterrevs(repo, candidatefilter)
2642 filtered = repoview.filterrevs(repo, candidatefilter)
2640 missing = [r for r in allbaserevs if r in filtered]
2643 missing = [r for r in allbaserevs if r in filtered]
2641 base = candidatebm.copy()
2644 base = candidatebm.copy()
2642 base.update(baserepo, missing)
2645 base.update(baserepo, missing)
2643 break
2646 break
2644 candidatefilter = subsettable.get(candidatefilter)
2647 candidatefilter = subsettable.get(candidatefilter)
2645 else:
2648 else:
2646 # no suitable subset where found
2649 # no suitable subset where found
2647 base = branchmap.branchcache()
2650 base = branchmap.branchcache()
2648 base.update(baserepo, allbaserevs)
2651 base.update(baserepo, allbaserevs)
2649
2652
2650 def setup():
2653 def setup():
2651 x[0] = base.copy()
2654 x[0] = base.copy()
2652 if clearcaches:
2655 if clearcaches:
2653 unfi._revbranchcache = None
2656 unfi._revbranchcache = None
2654 clearchangelog(repo)
2657 clearchangelog(repo)
2655
2658
2656 def bench():
2659 def bench():
2657 x[0].update(targetrepo, newrevs)
2660 x[0].update(targetrepo, newrevs)
2658
2661
2659 timer(bench, setup=setup)
2662 timer(bench, setup=setup)
2660 fm.end()
2663 fm.end()
2661 finally:
2664 finally:
2662 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
2665 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
2663 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
2666 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
2664
2667
2665 @command(b'perfbranchmapload', [
2668 @command(b'perfbranchmapload', [
2666 (b'f', b'filter', b'', b'Specify repoview filter'),
2669 (b'f', b'filter', b'', b'Specify repoview filter'),
2667 (b'', b'list', False, b'List brachmap filter caches'),
2670 (b'', b'list', False, b'List brachmap filter caches'),
2668 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
2671 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
2669
2672
2670 ] + formatteropts)
2673 ] + formatteropts)
2671 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
2674 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
2672 """benchmark reading the branchmap"""
2675 """benchmark reading the branchmap"""
2673 opts = _byteskwargs(opts)
2676 opts = _byteskwargs(opts)
2674 clearrevlogs = opts[b'clear_revlogs']
2677 clearrevlogs = opts[b'clear_revlogs']
2675
2678
2676 if list:
2679 if list:
2677 for name, kind, st in repo.cachevfs.readdir(stat=True):
2680 for name, kind, st in repo.cachevfs.readdir(stat=True):
2678 if name.startswith(b'branch2'):
2681 if name.startswith(b'branch2'):
2679 filtername = name.partition(b'-')[2] or b'unfiltered'
2682 filtername = name.partition(b'-')[2] or b'unfiltered'
2680 ui.status(b'%s - %s\n'
2683 ui.status(b'%s - %s\n'
2681 % (filtername, util.bytecount(st.st_size)))
2684 % (filtername, util.bytecount(st.st_size)))
2682 return
2685 return
2683 if not filter:
2686 if not filter:
2684 filter = None
2687 filter = None
2685 subsettable = getbranchmapsubsettable()
2688 subsettable = getbranchmapsubsettable()
2686 if filter is None:
2689 if filter is None:
2687 repo = repo.unfiltered()
2690 repo = repo.unfiltered()
2688 else:
2691 else:
2689 repo = repoview.repoview(repo, filter)
2692 repo = repoview.repoview(repo, filter)
2690
2693
2691 repo.branchmap() # make sure we have a relevant, up to date branchmap
2694 repo.branchmap() # make sure we have a relevant, up to date branchmap
2692
2695
2693 try:
2696 try:
2694 fromfile = branchmap.branchcache.fromfile
2697 fromfile = branchmap.branchcache.fromfile
2695 except AttributeError:
2698 except AttributeError:
2696 # older versions
2699 # older versions
2697 fromfile = branchmap.read
2700 fromfile = branchmap.read
2698
2701
2699 currentfilter = filter
2702 currentfilter = filter
2700 # try once without timer, the filter may not be cached
2703 # try once without timer, the filter may not be cached
2701 while fromfile(repo) is None:
2704 while fromfile(repo) is None:
2702 currentfilter = subsettable.get(currentfilter)
2705 currentfilter = subsettable.get(currentfilter)
2703 if currentfilter is None:
2706 if currentfilter is None:
2704 raise error.Abort(b'No branchmap cached for %s repo'
2707 raise error.Abort(b'No branchmap cached for %s repo'
2705 % (filter or b'unfiltered'))
2708 % (filter or b'unfiltered'))
2706 repo = repo.filtered(currentfilter)
2709 repo = repo.filtered(currentfilter)
2707 timer, fm = gettimer(ui, opts)
2710 timer, fm = gettimer(ui, opts)
2708 def setup():
2711 def setup():
2709 if clearrevlogs:
2712 if clearrevlogs:
2710 clearchangelog(repo)
2713 clearchangelog(repo)
2711 def bench():
2714 def bench():
2712 fromfile(repo)
2715 fromfile(repo)
2713 timer(bench, setup=setup)
2716 timer(bench, setup=setup)
2714 fm.end()
2717 fm.end()
2715
2718
2716 @command(b'perfloadmarkers')
2719 @command(b'perfloadmarkers')
2717 def perfloadmarkers(ui, repo):
2720 def perfloadmarkers(ui, repo):
2718 """benchmark the time to parse the on-disk markers for a repo
2721 """benchmark the time to parse the on-disk markers for a repo
2719
2722
2720 Result is the number of markers in the repo."""
2723 Result is the number of markers in the repo."""
2721 timer, fm = gettimer(ui)
2724 timer, fm = gettimer(ui)
2722 svfs = getsvfs(repo)
2725 svfs = getsvfs(repo)
2723 timer(lambda: len(obsolete.obsstore(svfs)))
2726 timer(lambda: len(obsolete.obsstore(svfs)))
2724 fm.end()
2727 fm.end()
2725
2728
2726 @command(b'perflrucachedict', formatteropts +
2729 @command(b'perflrucachedict', formatteropts +
2727 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
2730 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
2728 (b'', b'mincost', 0, b'smallest cost of items in cache'),
2731 (b'', b'mincost', 0, b'smallest cost of items in cache'),
2729 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
2732 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
2730 (b'', b'size', 4, b'size of cache'),
2733 (b'', b'size', 4, b'size of cache'),
2731 (b'', b'gets', 10000, b'number of key lookups'),
2734 (b'', b'gets', 10000, b'number of key lookups'),
2732 (b'', b'sets', 10000, b'number of key sets'),
2735 (b'', b'sets', 10000, b'number of key sets'),
2733 (b'', b'mixed', 10000, b'number of mixed mode operations'),
2736 (b'', b'mixed', 10000, b'number of mixed mode operations'),
2734 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
2737 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
2735 norepo=True)
2738 norepo=True)
2736 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
2739 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
2737 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
2740 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
2738 opts = _byteskwargs(opts)
2741 opts = _byteskwargs(opts)
2739
2742
2740 def doinit():
2743 def doinit():
2741 for i in _xrange(10000):
2744 for i in _xrange(10000):
2742 util.lrucachedict(size)
2745 util.lrucachedict(size)
2743
2746
2744 costrange = list(range(mincost, maxcost + 1))
2747 costrange = list(range(mincost, maxcost + 1))
2745
2748
2746 values = []
2749 values = []
2747 for i in _xrange(size):
2750 for i in _xrange(size):
2748 values.append(random.randint(0, _maxint))
2751 values.append(random.randint(0, _maxint))
2749
2752
2750 # Get mode fills the cache and tests raw lookup performance with no
2753 # Get mode fills the cache and tests raw lookup performance with no
2751 # eviction.
2754 # eviction.
2752 getseq = []
2755 getseq = []
2753 for i in _xrange(gets):
2756 for i in _xrange(gets):
2754 getseq.append(random.choice(values))
2757 getseq.append(random.choice(values))
2755
2758
2756 def dogets():
2759 def dogets():
2757 d = util.lrucachedict(size)
2760 d = util.lrucachedict(size)
2758 for v in values:
2761 for v in values:
2759 d[v] = v
2762 d[v] = v
2760 for key in getseq:
2763 for key in getseq:
2761 value = d[key]
2764 value = d[key]
2762 value # silence pyflakes warning
2765 value # silence pyflakes warning
2763
2766
2764 def dogetscost():
2767 def dogetscost():
2765 d = util.lrucachedict(size, maxcost=costlimit)
2768 d = util.lrucachedict(size, maxcost=costlimit)
2766 for i, v in enumerate(values):
2769 for i, v in enumerate(values):
2767 d.insert(v, v, cost=costs[i])
2770 d.insert(v, v, cost=costs[i])
2768 for key in getseq:
2771 for key in getseq:
2769 try:
2772 try:
2770 value = d[key]
2773 value = d[key]
2771 value # silence pyflakes warning
2774 value # silence pyflakes warning
2772 except KeyError:
2775 except KeyError:
2773 pass
2776 pass
2774
2777
2775 # Set mode tests insertion speed with cache eviction.
2778 # Set mode tests insertion speed with cache eviction.
2776 setseq = []
2779 setseq = []
2777 costs = []
2780 costs = []
2778 for i in _xrange(sets):
2781 for i in _xrange(sets):
2779 setseq.append(random.randint(0, _maxint))
2782 setseq.append(random.randint(0, _maxint))
2780 costs.append(random.choice(costrange))
2783 costs.append(random.choice(costrange))
2781
2784
2782 def doinserts():
2785 def doinserts():
2783 d = util.lrucachedict(size)
2786 d = util.lrucachedict(size)
2784 for v in setseq:
2787 for v in setseq:
2785 d.insert(v, v)
2788 d.insert(v, v)
2786
2789
2787 def doinsertscost():
2790 def doinsertscost():
2788 d = util.lrucachedict(size, maxcost=costlimit)
2791 d = util.lrucachedict(size, maxcost=costlimit)
2789 for i, v in enumerate(setseq):
2792 for i, v in enumerate(setseq):
2790 d.insert(v, v, cost=costs[i])
2793 d.insert(v, v, cost=costs[i])
2791
2794
2792 def dosets():
2795 def dosets():
2793 d = util.lrucachedict(size)
2796 d = util.lrucachedict(size)
2794 for v in setseq:
2797 for v in setseq:
2795 d[v] = v
2798 d[v] = v
2796
2799
2797 # Mixed mode randomly performs gets and sets with eviction.
2800 # Mixed mode randomly performs gets and sets with eviction.
2798 mixedops = []
2801 mixedops = []
2799 for i in _xrange(mixed):
2802 for i in _xrange(mixed):
2800 r = random.randint(0, 100)
2803 r = random.randint(0, 100)
2801 if r < mixedgetfreq:
2804 if r < mixedgetfreq:
2802 op = 0
2805 op = 0
2803 else:
2806 else:
2804 op = 1
2807 op = 1
2805
2808
2806 mixedops.append((op,
2809 mixedops.append((op,
2807 random.randint(0, size * 2),
2810 random.randint(0, size * 2),
2808 random.choice(costrange)))
2811 random.choice(costrange)))
2809
2812
2810 def domixed():
2813 def domixed():
2811 d = util.lrucachedict(size)
2814 d = util.lrucachedict(size)
2812
2815
2813 for op, v, cost in mixedops:
2816 for op, v, cost in mixedops:
2814 if op == 0:
2817 if op == 0:
2815 try:
2818 try:
2816 d[v]
2819 d[v]
2817 except KeyError:
2820 except KeyError:
2818 pass
2821 pass
2819 else:
2822 else:
2820 d[v] = v
2823 d[v] = v
2821
2824
2822 def domixedcost():
2825 def domixedcost():
2823 d = util.lrucachedict(size, maxcost=costlimit)
2826 d = util.lrucachedict(size, maxcost=costlimit)
2824
2827
2825 for op, v, cost in mixedops:
2828 for op, v, cost in mixedops:
2826 if op == 0:
2829 if op == 0:
2827 try:
2830 try:
2828 d[v]
2831 d[v]
2829 except KeyError:
2832 except KeyError:
2830 pass
2833 pass
2831 else:
2834 else:
2832 d.insert(v, v, cost=cost)
2835 d.insert(v, v, cost=cost)
2833
2836
2834 benches = [
2837 benches = [
2835 (doinit, b'init'),
2838 (doinit, b'init'),
2836 ]
2839 ]
2837
2840
2838 if costlimit:
2841 if costlimit:
2839 benches.extend([
2842 benches.extend([
2840 (dogetscost, b'gets w/ cost limit'),
2843 (dogetscost, b'gets w/ cost limit'),
2841 (doinsertscost, b'inserts w/ cost limit'),
2844 (doinsertscost, b'inserts w/ cost limit'),
2842 (domixedcost, b'mixed w/ cost limit'),
2845 (domixedcost, b'mixed w/ cost limit'),
2843 ])
2846 ])
2844 else:
2847 else:
2845 benches.extend([
2848 benches.extend([
2846 (dogets, b'gets'),
2849 (dogets, b'gets'),
2847 (doinserts, b'inserts'),
2850 (doinserts, b'inserts'),
2848 (dosets, b'sets'),
2851 (dosets, b'sets'),
2849 (domixed, b'mixed')
2852 (domixed, b'mixed')
2850 ])
2853 ])
2851
2854
2852 for fn, title in benches:
2855 for fn, title in benches:
2853 timer, fm = gettimer(ui, opts)
2856 timer, fm = gettimer(ui, opts)
2854 timer(fn, title=title)
2857 timer(fn, title=title)
2855 fm.end()
2858 fm.end()
2856
2859
2857 @command(b'perfwrite', formatteropts)
2860 @command(b'perfwrite', formatteropts)
2858 def perfwrite(ui, repo, **opts):
2861 def perfwrite(ui, repo, **opts):
2859 """microbenchmark ui.write
2862 """microbenchmark ui.write
2860 """
2863 """
2861 opts = _byteskwargs(opts)
2864 opts = _byteskwargs(opts)
2862
2865
2863 timer, fm = gettimer(ui, opts)
2866 timer, fm = gettimer(ui, opts)
2864 def write():
2867 def write():
2865 for i in range(100000):
2868 for i in range(100000):
2866 ui.write((b'Testing write performance\n'))
2869 ui.write((b'Testing write performance\n'))
2867 timer(write)
2870 timer(write)
2868 fm.end()
2871 fm.end()
2869
2872
2870 def uisetup(ui):
2873 def uisetup(ui):
2871 if (util.safehasattr(cmdutil, b'openrevlog') and
2874 if (util.safehasattr(cmdutil, b'openrevlog') and
2872 not util.safehasattr(commands, b'debugrevlogopts')):
2875 not util.safehasattr(commands, b'debugrevlogopts')):
2873 # for "historical portability":
2876 # for "historical portability":
2874 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
2877 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
2875 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
2878 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
2876 # openrevlog() should cause failure, because it has been
2879 # openrevlog() should cause failure, because it has been
2877 # available since 3.5 (or 49c583ca48c4).
2880 # available since 3.5 (or 49c583ca48c4).
2878 def openrevlog(orig, repo, cmd, file_, opts):
2881 def openrevlog(orig, repo, cmd, file_, opts):
2879 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
2882 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
2880 raise error.Abort(b"This version doesn't support --dir option",
2883 raise error.Abort(b"This version doesn't support --dir option",
2881 hint=b"use 3.5 or later")
2884 hint=b"use 3.5 or later")
2882 return orig(repo, cmd, file_, opts)
2885 return orig(repo, cmd, file_, opts)
2883 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
2886 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
2884
2887
2885 @command(b'perfprogress', formatteropts + [
2888 @command(b'perfprogress', formatteropts + [
2886 (b'', b'topic', b'topic', b'topic for progress messages'),
2889 (b'', b'topic', b'topic', b'topic for progress messages'),
2887 (b'c', b'total', 1000000, b'total value we are progressing to'),
2890 (b'c', b'total', 1000000, b'total value we are progressing to'),
2888 ], norepo=True)
2891 ], norepo=True)
2889 def perfprogress(ui, topic=None, total=None, **opts):
2892 def perfprogress(ui, topic=None, total=None, **opts):
2890 """printing of progress bars"""
2893 """printing of progress bars"""
2891 opts = _byteskwargs(opts)
2894 opts = _byteskwargs(opts)
2892
2895
2893 timer, fm = gettimer(ui, opts)
2896 timer, fm = gettimer(ui, opts)
2894
2897
2895 def doprogress():
2898 def doprogress():
2896 with ui.makeprogress(topic, total=total) as progress:
2899 with ui.makeprogress(topic, total=total) as progress:
2897 for i in pycompat.xrange(total):
2900 for i in pycompat.xrange(total):
2898 progress.increment()
2901 progress.increment()
2899
2902
2900 timer(doprogress)
2903 timer(doprogress)
2901 fm.end()
2904 fm.end()
General Comments 0
You need to be logged in to leave comments. Login now