##// END OF EJS Templates
perf: add a --from flag to perfmergecalculate...
marmoute -
r42573:e3ee707d default
parent child Browse files
Show More
@@ -1,2904 +1,2912
1 # perf.py - performance test routines
1 # perf.py - performance test routines
2 '''helper extension to measure performance
2 '''helper extension to measure performance
3
3
4 Configurations
4 Configurations
5 ==============
5 ==============
6
6
7 ``perf``
7 ``perf``
8 --------
8 --------
9
9
10 ``all-timing``
10 ``all-timing``
11 When set, additional statistics will be reported for each benchmark: best,
11 When set, additional statistics will be reported for each benchmark: best,
12 worst, median average. If not set only the best timing is reported
12 worst, median average. If not set only the best timing is reported
13 (default: off).
13 (default: off).
14
14
15 ``presleep``
15 ``presleep``
16 number of second to wait before any group of runs (default: 1)
16 number of second to wait before any group of runs (default: 1)
17
17
18 ``pre-run``
18 ``pre-run``
19 number of run to perform before starting measurement.
19 number of run to perform before starting measurement.
20
20
21 ``profile-benchmark``
21 ``profile-benchmark``
22 Enable profiling for the benchmarked section.
22 Enable profiling for the benchmarked section.
23 (The first iteration is benchmarked)
23 (The first iteration is benchmarked)
24
24
25 ``run-limits``
25 ``run-limits``
26 Control the number of runs each benchmark will perform. The option value
26 Control the number of runs each benchmark will perform. The option value
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
28 conditions are considered in order with the following logic:
28 conditions are considered in order with the following logic:
29
29
30 If benchmark has been running for <time> seconds, and we have performed
30 If benchmark has been running for <time> seconds, and we have performed
31 <numberofrun> iterations, stop the benchmark,
31 <numberofrun> iterations, stop the benchmark,
32
32
33 The default value is: `3.0-100, 10.0-3`
33 The default value is: `3.0-100, 10.0-3`
34
34
35 ``stub``
35 ``stub``
36 When set, benchmarks will only be run once, useful for testing
36 When set, benchmarks will only be run once, useful for testing
37 (default: off)
37 (default: off)
38 '''
38 '''
39
39
40 # "historical portability" policy of perf.py:
40 # "historical portability" policy of perf.py:
41 #
41 #
42 # We have to do:
42 # We have to do:
43 # - make perf.py "loadable" with as wide Mercurial version as possible
43 # - make perf.py "loadable" with as wide Mercurial version as possible
44 # This doesn't mean that perf commands work correctly with that Mercurial.
44 # This doesn't mean that perf commands work correctly with that Mercurial.
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
46 # - make historical perf command work correctly with as wide Mercurial
46 # - make historical perf command work correctly with as wide Mercurial
47 # version as possible
47 # version as possible
48 #
48 #
49 # We have to do, if possible with reasonable cost:
49 # We have to do, if possible with reasonable cost:
50 # - make recent perf command for historical feature work correctly
50 # - make recent perf command for historical feature work correctly
51 # with early Mercurial
51 # with early Mercurial
52 #
52 #
53 # We don't have to do:
53 # We don't have to do:
54 # - make perf command for recent feature work correctly with early
54 # - make perf command for recent feature work correctly with early
55 # Mercurial
55 # Mercurial
56
56
57 from __future__ import absolute_import
57 from __future__ import absolute_import
58 import contextlib
58 import contextlib
59 import functools
59 import functools
60 import gc
60 import gc
61 import os
61 import os
62 import random
62 import random
63 import shutil
63 import shutil
64 import struct
64 import struct
65 import sys
65 import sys
66 import tempfile
66 import tempfile
67 import threading
67 import threading
68 import time
68 import time
69 from mercurial import (
69 from mercurial import (
70 changegroup,
70 changegroup,
71 cmdutil,
71 cmdutil,
72 commands,
72 commands,
73 copies,
73 copies,
74 error,
74 error,
75 extensions,
75 extensions,
76 hg,
76 hg,
77 mdiff,
77 mdiff,
78 merge,
78 merge,
79 revlog,
79 revlog,
80 util,
80 util,
81 )
81 )
82
82
83 # for "historical portability":
83 # for "historical portability":
84 # try to import modules separately (in dict order), and ignore
84 # try to import modules separately (in dict order), and ignore
85 # failure, because these aren't available with early Mercurial
85 # failure, because these aren't available with early Mercurial
86 try:
86 try:
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
88 except ImportError:
88 except ImportError:
89 pass
89 pass
90 try:
90 try:
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
92 except ImportError:
92 except ImportError:
93 pass
93 pass
94 try:
94 try:
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
96 dir(registrar) # forcibly load it
96 dir(registrar) # forcibly load it
97 except ImportError:
97 except ImportError:
98 registrar = None
98 registrar = None
99 try:
99 try:
100 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
100 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
101 except ImportError:
101 except ImportError:
102 pass
102 pass
103 try:
103 try:
104 from mercurial.utils import repoviewutil # since 5.0
104 from mercurial.utils import repoviewutil # since 5.0
105 except ImportError:
105 except ImportError:
106 repoviewutil = None
106 repoviewutil = None
107 try:
107 try:
108 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
108 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
109 except ImportError:
109 except ImportError:
110 pass
110 pass
111 try:
111 try:
112 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
112 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
113 except ImportError:
113 except ImportError:
114 pass
114 pass
115
115
116 try:
116 try:
117 from mercurial import profiling
117 from mercurial import profiling
118 except ImportError:
118 except ImportError:
119 profiling = None
119 profiling = None
120
120
121 def identity(a):
121 def identity(a):
122 return a
122 return a
123
123
124 try:
124 try:
125 from mercurial import pycompat
125 from mercurial import pycompat
126 getargspec = pycompat.getargspec # added to module after 4.5
126 getargspec = pycompat.getargspec # added to module after 4.5
127 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
127 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
128 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
128 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
129 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
129 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
130 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
130 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
131 if pycompat.ispy3:
131 if pycompat.ispy3:
132 _maxint = sys.maxsize # per py3 docs for replacing maxint
132 _maxint = sys.maxsize # per py3 docs for replacing maxint
133 else:
133 else:
134 _maxint = sys.maxint
134 _maxint = sys.maxint
135 except (ImportError, AttributeError):
135 except (ImportError, AttributeError):
136 import inspect
136 import inspect
137 getargspec = inspect.getargspec
137 getargspec = inspect.getargspec
138 _byteskwargs = identity
138 _byteskwargs = identity
139 fsencode = identity # no py3 support
139 fsencode = identity # no py3 support
140 _maxint = sys.maxint # no py3 support
140 _maxint = sys.maxint # no py3 support
141 _sysstr = lambda x: x # no py3 support
141 _sysstr = lambda x: x # no py3 support
142 _xrange = xrange
142 _xrange = xrange
143
143
144 try:
144 try:
145 # 4.7+
145 # 4.7+
146 queue = pycompat.queue.Queue
146 queue = pycompat.queue.Queue
147 except (AttributeError, ImportError):
147 except (AttributeError, ImportError):
148 # <4.7.
148 # <4.7.
149 try:
149 try:
150 queue = pycompat.queue
150 queue = pycompat.queue
151 except (AttributeError, ImportError):
151 except (AttributeError, ImportError):
152 queue = util.queue
152 queue = util.queue
153
153
154 try:
154 try:
155 from mercurial import logcmdutil
155 from mercurial import logcmdutil
156 makelogtemplater = logcmdutil.maketemplater
156 makelogtemplater = logcmdutil.maketemplater
157 except (AttributeError, ImportError):
157 except (AttributeError, ImportError):
158 try:
158 try:
159 makelogtemplater = cmdutil.makelogtemplater
159 makelogtemplater = cmdutil.makelogtemplater
160 except (AttributeError, ImportError):
160 except (AttributeError, ImportError):
161 makelogtemplater = None
161 makelogtemplater = None
162
162
163 # for "historical portability":
163 # for "historical portability":
164 # define util.safehasattr forcibly, because util.safehasattr has been
164 # define util.safehasattr forcibly, because util.safehasattr has been
165 # available since 1.9.3 (or 94b200a11cf7)
165 # available since 1.9.3 (or 94b200a11cf7)
166 _undefined = object()
166 _undefined = object()
167 def safehasattr(thing, attr):
167 def safehasattr(thing, attr):
168 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
168 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
169 setattr(util, 'safehasattr', safehasattr)
169 setattr(util, 'safehasattr', safehasattr)
170
170
171 # for "historical portability":
171 # for "historical portability":
172 # define util.timer forcibly, because util.timer has been available
172 # define util.timer forcibly, because util.timer has been available
173 # since ae5d60bb70c9
173 # since ae5d60bb70c9
174 if safehasattr(time, 'perf_counter'):
174 if safehasattr(time, 'perf_counter'):
175 util.timer = time.perf_counter
175 util.timer = time.perf_counter
176 elif os.name == b'nt':
176 elif os.name == b'nt':
177 util.timer = time.clock
177 util.timer = time.clock
178 else:
178 else:
179 util.timer = time.time
179 util.timer = time.time
180
180
181 # for "historical portability":
181 # for "historical portability":
182 # use locally defined empty option list, if formatteropts isn't
182 # use locally defined empty option list, if formatteropts isn't
183 # available, because commands.formatteropts has been available since
183 # available, because commands.formatteropts has been available since
184 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
184 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
185 # available since 2.2 (or ae5f92e154d3)
185 # available since 2.2 (or ae5f92e154d3)
186 formatteropts = getattr(cmdutil, "formatteropts",
186 formatteropts = getattr(cmdutil, "formatteropts",
187 getattr(commands, "formatteropts", []))
187 getattr(commands, "formatteropts", []))
188
188
189 # for "historical portability":
189 # for "historical portability":
190 # use locally defined option list, if debugrevlogopts isn't available,
190 # use locally defined option list, if debugrevlogopts isn't available,
191 # because commands.debugrevlogopts has been available since 3.7 (or
191 # because commands.debugrevlogopts has been available since 3.7 (or
192 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
192 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
193 # since 1.9 (or a79fea6b3e77).
193 # since 1.9 (or a79fea6b3e77).
194 revlogopts = getattr(cmdutil, "debugrevlogopts",
194 revlogopts = getattr(cmdutil, "debugrevlogopts",
195 getattr(commands, "debugrevlogopts", [
195 getattr(commands, "debugrevlogopts", [
196 (b'c', b'changelog', False, (b'open changelog')),
196 (b'c', b'changelog', False, (b'open changelog')),
197 (b'm', b'manifest', False, (b'open manifest')),
197 (b'm', b'manifest', False, (b'open manifest')),
198 (b'', b'dir', False, (b'open directory manifest')),
198 (b'', b'dir', False, (b'open directory manifest')),
199 ]))
199 ]))
200
200
201 cmdtable = {}
201 cmdtable = {}
202
202
203 # for "historical portability":
203 # for "historical portability":
204 # define parsealiases locally, because cmdutil.parsealiases has been
204 # define parsealiases locally, because cmdutil.parsealiases has been
205 # available since 1.5 (or 6252852b4332)
205 # available since 1.5 (or 6252852b4332)
206 def parsealiases(cmd):
206 def parsealiases(cmd):
207 return cmd.split(b"|")
207 return cmd.split(b"|")
208
208
209 if safehasattr(registrar, 'command'):
209 if safehasattr(registrar, 'command'):
210 command = registrar.command(cmdtable)
210 command = registrar.command(cmdtable)
211 elif safehasattr(cmdutil, 'command'):
211 elif safehasattr(cmdutil, 'command'):
212 command = cmdutil.command(cmdtable)
212 command = cmdutil.command(cmdtable)
213 if b'norepo' not in getargspec(command).args:
213 if b'norepo' not in getargspec(command).args:
214 # for "historical portability":
214 # for "historical portability":
215 # wrap original cmdutil.command, because "norepo" option has
215 # wrap original cmdutil.command, because "norepo" option has
216 # been available since 3.1 (or 75a96326cecb)
216 # been available since 3.1 (or 75a96326cecb)
217 _command = command
217 _command = command
218 def command(name, options=(), synopsis=None, norepo=False):
218 def command(name, options=(), synopsis=None, norepo=False):
219 if norepo:
219 if norepo:
220 commands.norepo += b' %s' % b' '.join(parsealiases(name))
220 commands.norepo += b' %s' % b' '.join(parsealiases(name))
221 return _command(name, list(options), synopsis)
221 return _command(name, list(options), synopsis)
222 else:
222 else:
223 # for "historical portability":
223 # for "historical portability":
224 # define "@command" annotation locally, because cmdutil.command
224 # define "@command" annotation locally, because cmdutil.command
225 # has been available since 1.9 (or 2daa5179e73f)
225 # has been available since 1.9 (or 2daa5179e73f)
226 def command(name, options=(), synopsis=None, norepo=False):
226 def command(name, options=(), synopsis=None, norepo=False):
227 def decorator(func):
227 def decorator(func):
228 if synopsis:
228 if synopsis:
229 cmdtable[name] = func, list(options), synopsis
229 cmdtable[name] = func, list(options), synopsis
230 else:
230 else:
231 cmdtable[name] = func, list(options)
231 cmdtable[name] = func, list(options)
232 if norepo:
232 if norepo:
233 commands.norepo += b' %s' % b' '.join(parsealiases(name))
233 commands.norepo += b' %s' % b' '.join(parsealiases(name))
234 return func
234 return func
235 return decorator
235 return decorator
236
236
237 try:
237 try:
238 import mercurial.registrar
238 import mercurial.registrar
239 import mercurial.configitems
239 import mercurial.configitems
240 configtable = {}
240 configtable = {}
241 configitem = mercurial.registrar.configitem(configtable)
241 configitem = mercurial.registrar.configitem(configtable)
242 configitem(b'perf', b'presleep',
242 configitem(b'perf', b'presleep',
243 default=mercurial.configitems.dynamicdefault,
243 default=mercurial.configitems.dynamicdefault,
244 )
244 )
245 configitem(b'perf', b'stub',
245 configitem(b'perf', b'stub',
246 default=mercurial.configitems.dynamicdefault,
246 default=mercurial.configitems.dynamicdefault,
247 )
247 )
248 configitem(b'perf', b'parentscount',
248 configitem(b'perf', b'parentscount',
249 default=mercurial.configitems.dynamicdefault,
249 default=mercurial.configitems.dynamicdefault,
250 )
250 )
251 configitem(b'perf', b'all-timing',
251 configitem(b'perf', b'all-timing',
252 default=mercurial.configitems.dynamicdefault,
252 default=mercurial.configitems.dynamicdefault,
253 )
253 )
254 configitem(b'perf', b'pre-run',
254 configitem(b'perf', b'pre-run',
255 default=mercurial.configitems.dynamicdefault,
255 default=mercurial.configitems.dynamicdefault,
256 )
256 )
257 configitem(b'perf', b'profile-benchmark',
257 configitem(b'perf', b'profile-benchmark',
258 default=mercurial.configitems.dynamicdefault,
258 default=mercurial.configitems.dynamicdefault,
259 )
259 )
260 configitem(b'perf', b'run-limits',
260 configitem(b'perf', b'run-limits',
261 default=mercurial.configitems.dynamicdefault,
261 default=mercurial.configitems.dynamicdefault,
262 )
262 )
263 except (ImportError, AttributeError):
263 except (ImportError, AttributeError):
264 pass
264 pass
265
265
266 def getlen(ui):
266 def getlen(ui):
267 if ui.configbool(b"perf", b"stub", False):
267 if ui.configbool(b"perf", b"stub", False):
268 return lambda x: 1
268 return lambda x: 1
269 return len
269 return len
270
270
271 class noop(object):
271 class noop(object):
272 """dummy context manager"""
272 """dummy context manager"""
273 def __enter__(self):
273 def __enter__(self):
274 pass
274 pass
275 def __exit__(self, *args):
275 def __exit__(self, *args):
276 pass
276 pass
277
277
278 NOOPCTX = noop()
278 NOOPCTX = noop()
279
279
280 def gettimer(ui, opts=None):
280 def gettimer(ui, opts=None):
281 """return a timer function and formatter: (timer, formatter)
281 """return a timer function and formatter: (timer, formatter)
282
282
283 This function exists to gather the creation of formatter in a single
283 This function exists to gather the creation of formatter in a single
284 place instead of duplicating it in all performance commands."""
284 place instead of duplicating it in all performance commands."""
285
285
286 # enforce an idle period before execution to counteract power management
286 # enforce an idle period before execution to counteract power management
287 # experimental config: perf.presleep
287 # experimental config: perf.presleep
288 time.sleep(getint(ui, b"perf", b"presleep", 1))
288 time.sleep(getint(ui, b"perf", b"presleep", 1))
289
289
290 if opts is None:
290 if opts is None:
291 opts = {}
291 opts = {}
292 # redirect all to stderr unless buffer api is in use
292 # redirect all to stderr unless buffer api is in use
293 if not ui._buffers:
293 if not ui._buffers:
294 ui = ui.copy()
294 ui = ui.copy()
295 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
295 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
296 if uifout:
296 if uifout:
297 # for "historical portability":
297 # for "historical portability":
298 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
298 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
299 uifout.set(ui.ferr)
299 uifout.set(ui.ferr)
300
300
301 # get a formatter
301 # get a formatter
302 uiformatter = getattr(ui, 'formatter', None)
302 uiformatter = getattr(ui, 'formatter', None)
303 if uiformatter:
303 if uiformatter:
304 fm = uiformatter(b'perf', opts)
304 fm = uiformatter(b'perf', opts)
305 else:
305 else:
306 # for "historical portability":
306 # for "historical portability":
307 # define formatter locally, because ui.formatter has been
307 # define formatter locally, because ui.formatter has been
308 # available since 2.2 (or ae5f92e154d3)
308 # available since 2.2 (or ae5f92e154d3)
309 from mercurial import node
309 from mercurial import node
310 class defaultformatter(object):
310 class defaultformatter(object):
311 """Minimized composition of baseformatter and plainformatter
311 """Minimized composition of baseformatter and plainformatter
312 """
312 """
313 def __init__(self, ui, topic, opts):
313 def __init__(self, ui, topic, opts):
314 self._ui = ui
314 self._ui = ui
315 if ui.debugflag:
315 if ui.debugflag:
316 self.hexfunc = node.hex
316 self.hexfunc = node.hex
317 else:
317 else:
318 self.hexfunc = node.short
318 self.hexfunc = node.short
319 def __nonzero__(self):
319 def __nonzero__(self):
320 return False
320 return False
321 __bool__ = __nonzero__
321 __bool__ = __nonzero__
322 def startitem(self):
322 def startitem(self):
323 pass
323 pass
324 def data(self, **data):
324 def data(self, **data):
325 pass
325 pass
326 def write(self, fields, deftext, *fielddata, **opts):
326 def write(self, fields, deftext, *fielddata, **opts):
327 self._ui.write(deftext % fielddata, **opts)
327 self._ui.write(deftext % fielddata, **opts)
328 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
328 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
329 if cond:
329 if cond:
330 self._ui.write(deftext % fielddata, **opts)
330 self._ui.write(deftext % fielddata, **opts)
331 def plain(self, text, **opts):
331 def plain(self, text, **opts):
332 self._ui.write(text, **opts)
332 self._ui.write(text, **opts)
333 def end(self):
333 def end(self):
334 pass
334 pass
335 fm = defaultformatter(ui, b'perf', opts)
335 fm = defaultformatter(ui, b'perf', opts)
336
336
337 # stub function, runs code only once instead of in a loop
337 # stub function, runs code only once instead of in a loop
338 # experimental config: perf.stub
338 # experimental config: perf.stub
339 if ui.configbool(b"perf", b"stub", False):
339 if ui.configbool(b"perf", b"stub", False):
340 return functools.partial(stub_timer, fm), fm
340 return functools.partial(stub_timer, fm), fm
341
341
342 # experimental config: perf.all-timing
342 # experimental config: perf.all-timing
343 displayall = ui.configbool(b"perf", b"all-timing", False)
343 displayall = ui.configbool(b"perf", b"all-timing", False)
344
344
345 # experimental config: perf.run-limits
345 # experimental config: perf.run-limits
346 limitspec = ui.configlist(b"perf", b"run-limits", [])
346 limitspec = ui.configlist(b"perf", b"run-limits", [])
347 limits = []
347 limits = []
348 for item in limitspec:
348 for item in limitspec:
349 parts = item.split(b'-', 1)
349 parts = item.split(b'-', 1)
350 if len(parts) < 2:
350 if len(parts) < 2:
351 ui.warn((b'malformatted run limit entry, missing "-": %s\n'
351 ui.warn((b'malformatted run limit entry, missing "-": %s\n'
352 % item))
352 % item))
353 continue
353 continue
354 try:
354 try:
355 time_limit = float(pycompat.sysstr(parts[0]))
355 time_limit = float(pycompat.sysstr(parts[0]))
356 except ValueError as e:
356 except ValueError as e:
357 ui.warn((b'malformatted run limit entry, %s: %s\n'
357 ui.warn((b'malformatted run limit entry, %s: %s\n'
358 % (pycompat.bytestr(e), item)))
358 % (pycompat.bytestr(e), item)))
359 continue
359 continue
360 try:
360 try:
361 run_limit = int(pycompat.sysstr(parts[1]))
361 run_limit = int(pycompat.sysstr(parts[1]))
362 except ValueError as e:
362 except ValueError as e:
363 ui.warn((b'malformatted run limit entry, %s: %s\n'
363 ui.warn((b'malformatted run limit entry, %s: %s\n'
364 % (pycompat.bytestr(e), item)))
364 % (pycompat.bytestr(e), item)))
365 continue
365 continue
366 limits.append((time_limit, run_limit))
366 limits.append((time_limit, run_limit))
367 if not limits:
367 if not limits:
368 limits = DEFAULTLIMITS
368 limits = DEFAULTLIMITS
369
369
370 profiler = None
370 profiler = None
371 if profiling is not None:
371 if profiling is not None:
372 if ui.configbool(b"perf", b"profile-benchmark", False):
372 if ui.configbool(b"perf", b"profile-benchmark", False):
373 profiler = profiling.profile(ui)
373 profiler = profiling.profile(ui)
374
374
375 prerun = getint(ui, b"perf", b"pre-run", 0)
375 prerun = getint(ui, b"perf", b"pre-run", 0)
376 t = functools.partial(_timer, fm, displayall=displayall, limits=limits,
376 t = functools.partial(_timer, fm, displayall=displayall, limits=limits,
377 prerun=prerun, profiler=profiler)
377 prerun=prerun, profiler=profiler)
378 return t, fm
378 return t, fm
379
379
380 def stub_timer(fm, func, setup=None, title=None):
380 def stub_timer(fm, func, setup=None, title=None):
381 if setup is not None:
381 if setup is not None:
382 setup()
382 setup()
383 func()
383 func()
384
384
385 @contextlib.contextmanager
385 @contextlib.contextmanager
386 def timeone():
386 def timeone():
387 r = []
387 r = []
388 ostart = os.times()
388 ostart = os.times()
389 cstart = util.timer()
389 cstart = util.timer()
390 yield r
390 yield r
391 cstop = util.timer()
391 cstop = util.timer()
392 ostop = os.times()
392 ostop = os.times()
393 a, b = ostart, ostop
393 a, b = ostart, ostop
394 r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
394 r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
395
395
396
396
397 # list of stop condition (elapsed time, minimal run count)
397 # list of stop condition (elapsed time, minimal run count)
398 DEFAULTLIMITS = (
398 DEFAULTLIMITS = (
399 (3.0, 100),
399 (3.0, 100),
400 (10.0, 3),
400 (10.0, 3),
401 )
401 )
402
402
403 def _timer(fm, func, setup=None, title=None, displayall=False,
403 def _timer(fm, func, setup=None, title=None, displayall=False,
404 limits=DEFAULTLIMITS, prerun=0, profiler=None):
404 limits=DEFAULTLIMITS, prerun=0, profiler=None):
405 gc.collect()
405 gc.collect()
406 results = []
406 results = []
407 begin = util.timer()
407 begin = util.timer()
408 count = 0
408 count = 0
409 if profiler is None:
409 if profiler is None:
410 profiler = NOOPCTX
410 profiler = NOOPCTX
411 for i in range(prerun):
411 for i in range(prerun):
412 if setup is not None:
412 if setup is not None:
413 setup()
413 setup()
414 func()
414 func()
415 keepgoing = True
415 keepgoing = True
416 while keepgoing:
416 while keepgoing:
417 if setup is not None:
417 if setup is not None:
418 setup()
418 setup()
419 with profiler:
419 with profiler:
420 with timeone() as item:
420 with timeone() as item:
421 r = func()
421 r = func()
422 profiler = NOOPCTX
422 profiler = NOOPCTX
423 count += 1
423 count += 1
424 results.append(item[0])
424 results.append(item[0])
425 cstop = util.timer()
425 cstop = util.timer()
426 # Look for a stop condition.
426 # Look for a stop condition.
427 elapsed = cstop - begin
427 elapsed = cstop - begin
428 for t, mincount in limits:
428 for t, mincount in limits:
429 if elapsed >= t and count >= mincount:
429 if elapsed >= t and count >= mincount:
430 keepgoing = False
430 keepgoing = False
431 break
431 break
432
432
433 formatone(fm, results, title=title, result=r,
433 formatone(fm, results, title=title, result=r,
434 displayall=displayall)
434 displayall=displayall)
435
435
436 def formatone(fm, timings, title=None, result=None, displayall=False):
436 def formatone(fm, timings, title=None, result=None, displayall=False):
437
437
438 count = len(timings)
438 count = len(timings)
439
439
440 fm.startitem()
440 fm.startitem()
441
441
442 if title:
442 if title:
443 fm.write(b'title', b'! %s\n', title)
443 fm.write(b'title', b'! %s\n', title)
444 if result:
444 if result:
445 fm.write(b'result', b'! result: %s\n', result)
445 fm.write(b'result', b'! result: %s\n', result)
446 def display(role, entry):
446 def display(role, entry):
447 prefix = b''
447 prefix = b''
448 if role != b'best':
448 if role != b'best':
449 prefix = b'%s.' % role
449 prefix = b'%s.' % role
450 fm.plain(b'!')
450 fm.plain(b'!')
451 fm.write(prefix + b'wall', b' wall %f', entry[0])
451 fm.write(prefix + b'wall', b' wall %f', entry[0])
452 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
452 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
453 fm.write(prefix + b'user', b' user %f', entry[1])
453 fm.write(prefix + b'user', b' user %f', entry[1])
454 fm.write(prefix + b'sys', b' sys %f', entry[2])
454 fm.write(prefix + b'sys', b' sys %f', entry[2])
455 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
455 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
456 fm.plain(b'\n')
456 fm.plain(b'\n')
457 timings.sort()
457 timings.sort()
458 min_val = timings[0]
458 min_val = timings[0]
459 display(b'best', min_val)
459 display(b'best', min_val)
460 if displayall:
460 if displayall:
461 max_val = timings[-1]
461 max_val = timings[-1]
462 display(b'max', max_val)
462 display(b'max', max_val)
463 avg = tuple([sum(x) / count for x in zip(*timings)])
463 avg = tuple([sum(x) / count for x in zip(*timings)])
464 display(b'avg', avg)
464 display(b'avg', avg)
465 median = timings[len(timings) // 2]
465 median = timings[len(timings) // 2]
466 display(b'median', median)
466 display(b'median', median)
467
467
468 # utilities for historical portability
468 # utilities for historical portability
469
469
470 def getint(ui, section, name, default):
470 def getint(ui, section, name, default):
471 # for "historical portability":
471 # for "historical portability":
472 # ui.configint has been available since 1.9 (or fa2b596db182)
472 # ui.configint has been available since 1.9 (or fa2b596db182)
473 v = ui.config(section, name, None)
473 v = ui.config(section, name, None)
474 if v is None:
474 if v is None:
475 return default
475 return default
476 try:
476 try:
477 return int(v)
477 return int(v)
478 except ValueError:
478 except ValueError:
479 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
479 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
480 % (section, name, v))
480 % (section, name, v))
481
481
482 def safeattrsetter(obj, name, ignoremissing=False):
482 def safeattrsetter(obj, name, ignoremissing=False):
483 """Ensure that 'obj' has 'name' attribute before subsequent setattr
483 """Ensure that 'obj' has 'name' attribute before subsequent setattr
484
484
485 This function is aborted, if 'obj' doesn't have 'name' attribute
485 This function is aborted, if 'obj' doesn't have 'name' attribute
486 at runtime. This avoids overlooking removal of an attribute, which
486 at runtime. This avoids overlooking removal of an attribute, which
487 breaks assumption of performance measurement, in the future.
487 breaks assumption of performance measurement, in the future.
488
488
489 This function returns the object to (1) assign a new value, and
489 This function returns the object to (1) assign a new value, and
490 (2) restore an original value to the attribute.
490 (2) restore an original value to the attribute.
491
491
492 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
492 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
493 abortion, and this function returns None. This is useful to
493 abortion, and this function returns None. This is useful to
494 examine an attribute, which isn't ensured in all Mercurial
494 examine an attribute, which isn't ensured in all Mercurial
495 versions.
495 versions.
496 """
496 """
497 if not util.safehasattr(obj, name):
497 if not util.safehasattr(obj, name):
498 if ignoremissing:
498 if ignoremissing:
499 return None
499 return None
500 raise error.Abort((b"missing attribute %s of %s might break assumption"
500 raise error.Abort((b"missing attribute %s of %s might break assumption"
501 b" of performance measurement") % (name, obj))
501 b" of performance measurement") % (name, obj))
502
502
503 origvalue = getattr(obj, _sysstr(name))
503 origvalue = getattr(obj, _sysstr(name))
504 class attrutil(object):
504 class attrutil(object):
505 def set(self, newvalue):
505 def set(self, newvalue):
506 setattr(obj, _sysstr(name), newvalue)
506 setattr(obj, _sysstr(name), newvalue)
507 def restore(self):
507 def restore(self):
508 setattr(obj, _sysstr(name), origvalue)
508 setattr(obj, _sysstr(name), origvalue)
509
509
510 return attrutil()
510 return attrutil()
511
511
512 # utilities to examine each internal API changes
512 # utilities to examine each internal API changes
513
513
514 def getbranchmapsubsettable():
514 def getbranchmapsubsettable():
515 # for "historical portability":
515 # for "historical portability":
516 # subsettable is defined in:
516 # subsettable is defined in:
517 # - branchmap since 2.9 (or 175c6fd8cacc)
517 # - branchmap since 2.9 (or 175c6fd8cacc)
518 # - repoview since 2.5 (or 59a9f18d4587)
518 # - repoview since 2.5 (or 59a9f18d4587)
519 # - repoviewutil since 5.0
519 # - repoviewutil since 5.0
520 for mod in (branchmap, repoview, repoviewutil):
520 for mod in (branchmap, repoview, repoviewutil):
521 subsettable = getattr(mod, 'subsettable', None)
521 subsettable = getattr(mod, 'subsettable', None)
522 if subsettable:
522 if subsettable:
523 return subsettable
523 return subsettable
524
524
525 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
525 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
526 # branchmap and repoview modules exist, but subsettable attribute
526 # branchmap and repoview modules exist, but subsettable attribute
527 # doesn't)
527 # doesn't)
528 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
528 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
529 hint=b"use 2.5 or later")
529 hint=b"use 2.5 or later")
530
530
531 def getsvfs(repo):
531 def getsvfs(repo):
532 """Return appropriate object to access files under .hg/store
532 """Return appropriate object to access files under .hg/store
533 """
533 """
534 # for "historical portability":
534 # for "historical portability":
535 # repo.svfs has been available since 2.3 (or 7034365089bf)
535 # repo.svfs has been available since 2.3 (or 7034365089bf)
536 svfs = getattr(repo, 'svfs', None)
536 svfs = getattr(repo, 'svfs', None)
537 if svfs:
537 if svfs:
538 return svfs
538 return svfs
539 else:
539 else:
540 return getattr(repo, 'sopener')
540 return getattr(repo, 'sopener')
541
541
542 def getvfs(repo):
542 def getvfs(repo):
543 """Return appropriate object to access files under .hg
543 """Return appropriate object to access files under .hg
544 """
544 """
545 # for "historical portability":
545 # for "historical portability":
546 # repo.vfs has been available since 2.3 (or 7034365089bf)
546 # repo.vfs has been available since 2.3 (or 7034365089bf)
547 vfs = getattr(repo, 'vfs', None)
547 vfs = getattr(repo, 'vfs', None)
548 if vfs:
548 if vfs:
549 return vfs
549 return vfs
550 else:
550 else:
551 return getattr(repo, 'opener')
551 return getattr(repo, 'opener')
552
552
553 def repocleartagscachefunc(repo):
553 def repocleartagscachefunc(repo):
554 """Return the function to clear tags cache according to repo internal API
554 """Return the function to clear tags cache according to repo internal API
555 """
555 """
556 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
556 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
557 # in this case, setattr(repo, '_tagscache', None) or so isn't
557 # in this case, setattr(repo, '_tagscache', None) or so isn't
558 # correct way to clear tags cache, because existing code paths
558 # correct way to clear tags cache, because existing code paths
559 # expect _tagscache to be a structured object.
559 # expect _tagscache to be a structured object.
560 def clearcache():
560 def clearcache():
561 # _tagscache has been filteredpropertycache since 2.5 (or
561 # _tagscache has been filteredpropertycache since 2.5 (or
562 # 98c867ac1330), and delattr() can't work in such case
562 # 98c867ac1330), and delattr() can't work in such case
563 if b'_tagscache' in vars(repo):
563 if b'_tagscache' in vars(repo):
564 del repo.__dict__[b'_tagscache']
564 del repo.__dict__[b'_tagscache']
565 return clearcache
565 return clearcache
566
566
567 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
567 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
568 if repotags: # since 1.4 (or 5614a628d173)
568 if repotags: # since 1.4 (or 5614a628d173)
569 return lambda : repotags.set(None)
569 return lambda : repotags.set(None)
570
570
571 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
571 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
572 if repotagscache: # since 0.6 (or d7df759d0e97)
572 if repotagscache: # since 0.6 (or d7df759d0e97)
573 return lambda : repotagscache.set(None)
573 return lambda : repotagscache.set(None)
574
574
575 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
575 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
576 # this point, but it isn't so problematic, because:
576 # this point, but it isn't so problematic, because:
577 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
577 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
578 # in perftags() causes failure soon
578 # in perftags() causes failure soon
579 # - perf.py itself has been available since 1.1 (or eb240755386d)
579 # - perf.py itself has been available since 1.1 (or eb240755386d)
580 raise error.Abort((b"tags API of this hg command is unknown"))
580 raise error.Abort((b"tags API of this hg command is unknown"))
581
581
582 # utilities to clear cache
582 # utilities to clear cache
583
583
584 def clearfilecache(obj, attrname):
584 def clearfilecache(obj, attrname):
585 unfiltered = getattr(obj, 'unfiltered', None)
585 unfiltered = getattr(obj, 'unfiltered', None)
586 if unfiltered is not None:
586 if unfiltered is not None:
587 obj = obj.unfiltered()
587 obj = obj.unfiltered()
588 if attrname in vars(obj):
588 if attrname in vars(obj):
589 delattr(obj, attrname)
589 delattr(obj, attrname)
590 obj._filecache.pop(attrname, None)
590 obj._filecache.pop(attrname, None)
591
591
592 def clearchangelog(repo):
592 def clearchangelog(repo):
593 if repo is not repo.unfiltered():
593 if repo is not repo.unfiltered():
594 object.__setattr__(repo, r'_clcachekey', None)
594 object.__setattr__(repo, r'_clcachekey', None)
595 object.__setattr__(repo, r'_clcache', None)
595 object.__setattr__(repo, r'_clcache', None)
596 clearfilecache(repo.unfiltered(), 'changelog')
596 clearfilecache(repo.unfiltered(), 'changelog')
597
597
598 # perf commands
598 # perf commands
599
599
600 @command(b'perfwalk', formatteropts)
600 @command(b'perfwalk', formatteropts)
601 def perfwalk(ui, repo, *pats, **opts):
601 def perfwalk(ui, repo, *pats, **opts):
602 opts = _byteskwargs(opts)
602 opts = _byteskwargs(opts)
603 timer, fm = gettimer(ui, opts)
603 timer, fm = gettimer(ui, opts)
604 m = scmutil.match(repo[None], pats, {})
604 m = scmutil.match(repo[None], pats, {})
605 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
605 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
606 ignored=False))))
606 ignored=False))))
607 fm.end()
607 fm.end()
608
608
609 @command(b'perfannotate', formatteropts)
609 @command(b'perfannotate', formatteropts)
610 def perfannotate(ui, repo, f, **opts):
610 def perfannotate(ui, repo, f, **opts):
611 opts = _byteskwargs(opts)
611 opts = _byteskwargs(opts)
612 timer, fm = gettimer(ui, opts)
612 timer, fm = gettimer(ui, opts)
613 fc = repo[b'.'][f]
613 fc = repo[b'.'][f]
614 timer(lambda: len(fc.annotate(True)))
614 timer(lambda: len(fc.annotate(True)))
615 fm.end()
615 fm.end()
616
616
617 @command(b'perfstatus',
617 @command(b'perfstatus',
618 [(b'u', b'unknown', False,
618 [(b'u', b'unknown', False,
619 b'ask status to look for unknown files')] + formatteropts)
619 b'ask status to look for unknown files')] + formatteropts)
620 def perfstatus(ui, repo, **opts):
620 def perfstatus(ui, repo, **opts):
621 opts = _byteskwargs(opts)
621 opts = _byteskwargs(opts)
622 #m = match.always(repo.root, repo.getcwd())
622 #m = match.always(repo.root, repo.getcwd())
623 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
623 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
624 # False))))
624 # False))))
625 timer, fm = gettimer(ui, opts)
625 timer, fm = gettimer(ui, opts)
626 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
626 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
627 fm.end()
627 fm.end()
628
628
629 @command(b'perfaddremove', formatteropts)
629 @command(b'perfaddremove', formatteropts)
630 def perfaddremove(ui, repo, **opts):
630 def perfaddremove(ui, repo, **opts):
631 opts = _byteskwargs(opts)
631 opts = _byteskwargs(opts)
632 timer, fm = gettimer(ui, opts)
632 timer, fm = gettimer(ui, opts)
633 try:
633 try:
634 oldquiet = repo.ui.quiet
634 oldquiet = repo.ui.quiet
635 repo.ui.quiet = True
635 repo.ui.quiet = True
636 matcher = scmutil.match(repo[None])
636 matcher = scmutil.match(repo[None])
637 opts[b'dry_run'] = True
637 opts[b'dry_run'] = True
638 if b'uipathfn' in getargspec(scmutil.addremove).args:
638 if b'uipathfn' in getargspec(scmutil.addremove).args:
639 uipathfn = scmutil.getuipathfn(repo)
639 uipathfn = scmutil.getuipathfn(repo)
640 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
640 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
641 else:
641 else:
642 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
642 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
643 finally:
643 finally:
644 repo.ui.quiet = oldquiet
644 repo.ui.quiet = oldquiet
645 fm.end()
645 fm.end()
646
646
647 def clearcaches(cl):
647 def clearcaches(cl):
648 # behave somewhat consistently across internal API changes
648 # behave somewhat consistently across internal API changes
649 if util.safehasattr(cl, b'clearcaches'):
649 if util.safehasattr(cl, b'clearcaches'):
650 cl.clearcaches()
650 cl.clearcaches()
651 elif util.safehasattr(cl, b'_nodecache'):
651 elif util.safehasattr(cl, b'_nodecache'):
652 from mercurial.node import nullid, nullrev
652 from mercurial.node import nullid, nullrev
653 cl._nodecache = {nullid: nullrev}
653 cl._nodecache = {nullid: nullrev}
654 cl._nodepos = None
654 cl._nodepos = None
655
655
656 @command(b'perfheads', formatteropts)
656 @command(b'perfheads', formatteropts)
657 def perfheads(ui, repo, **opts):
657 def perfheads(ui, repo, **opts):
658 """benchmark the computation of a changelog heads"""
658 """benchmark the computation of a changelog heads"""
659 opts = _byteskwargs(opts)
659 opts = _byteskwargs(opts)
660 timer, fm = gettimer(ui, opts)
660 timer, fm = gettimer(ui, opts)
661 cl = repo.changelog
661 cl = repo.changelog
662 def s():
662 def s():
663 clearcaches(cl)
663 clearcaches(cl)
664 def d():
664 def d():
665 len(cl.headrevs())
665 len(cl.headrevs())
666 timer(d, setup=s)
666 timer(d, setup=s)
667 fm.end()
667 fm.end()
668
668
669 @command(b'perftags', formatteropts+
669 @command(b'perftags', formatteropts+
670 [
670 [
671 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
671 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
672 ])
672 ])
673 def perftags(ui, repo, **opts):
673 def perftags(ui, repo, **opts):
674 opts = _byteskwargs(opts)
674 opts = _byteskwargs(opts)
675 timer, fm = gettimer(ui, opts)
675 timer, fm = gettimer(ui, opts)
676 repocleartagscache = repocleartagscachefunc(repo)
676 repocleartagscache = repocleartagscachefunc(repo)
677 clearrevlogs = opts[b'clear_revlogs']
677 clearrevlogs = opts[b'clear_revlogs']
678 def s():
678 def s():
679 if clearrevlogs:
679 if clearrevlogs:
680 clearchangelog(repo)
680 clearchangelog(repo)
681 clearfilecache(repo.unfiltered(), 'manifest')
681 clearfilecache(repo.unfiltered(), 'manifest')
682 repocleartagscache()
682 repocleartagscache()
683 def t():
683 def t():
684 return len(repo.tags())
684 return len(repo.tags())
685 timer(t, setup=s)
685 timer(t, setup=s)
686 fm.end()
686 fm.end()
687
687
688 @command(b'perfancestors', formatteropts)
688 @command(b'perfancestors', formatteropts)
689 def perfancestors(ui, repo, **opts):
689 def perfancestors(ui, repo, **opts):
690 opts = _byteskwargs(opts)
690 opts = _byteskwargs(opts)
691 timer, fm = gettimer(ui, opts)
691 timer, fm = gettimer(ui, opts)
692 heads = repo.changelog.headrevs()
692 heads = repo.changelog.headrevs()
693 def d():
693 def d():
694 for a in repo.changelog.ancestors(heads):
694 for a in repo.changelog.ancestors(heads):
695 pass
695 pass
696 timer(d)
696 timer(d)
697 fm.end()
697 fm.end()
698
698
699 @command(b'perfancestorset', formatteropts)
699 @command(b'perfancestorset', formatteropts)
700 def perfancestorset(ui, repo, revset, **opts):
700 def perfancestorset(ui, repo, revset, **opts):
701 opts = _byteskwargs(opts)
701 opts = _byteskwargs(opts)
702 timer, fm = gettimer(ui, opts)
702 timer, fm = gettimer(ui, opts)
703 revs = repo.revs(revset)
703 revs = repo.revs(revset)
704 heads = repo.changelog.headrevs()
704 heads = repo.changelog.headrevs()
705 def d():
705 def d():
706 s = repo.changelog.ancestors(heads)
706 s = repo.changelog.ancestors(heads)
707 for rev in revs:
707 for rev in revs:
708 rev in s
708 rev in s
709 timer(d)
709 timer(d)
710 fm.end()
710 fm.end()
711
711
712 @command(b'perfdiscovery', formatteropts, b'PATH')
712 @command(b'perfdiscovery', formatteropts, b'PATH')
713 def perfdiscovery(ui, repo, path, **opts):
713 def perfdiscovery(ui, repo, path, **opts):
714 """benchmark discovery between local repo and the peer at given path
714 """benchmark discovery between local repo and the peer at given path
715 """
715 """
716 repos = [repo, None]
716 repos = [repo, None]
717 timer, fm = gettimer(ui, opts)
717 timer, fm = gettimer(ui, opts)
718 path = ui.expandpath(path)
718 path = ui.expandpath(path)
719
719
720 def s():
720 def s():
721 repos[1] = hg.peer(ui, opts, path)
721 repos[1] = hg.peer(ui, opts, path)
722 def d():
722 def d():
723 setdiscovery.findcommonheads(ui, *repos)
723 setdiscovery.findcommonheads(ui, *repos)
724 timer(d, setup=s)
724 timer(d, setup=s)
725 fm.end()
725 fm.end()
726
726
727 @command(b'perfbookmarks', formatteropts +
727 @command(b'perfbookmarks', formatteropts +
728 [
728 [
729 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
729 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
730 ])
730 ])
731 def perfbookmarks(ui, repo, **opts):
731 def perfbookmarks(ui, repo, **opts):
732 """benchmark parsing bookmarks from disk to memory"""
732 """benchmark parsing bookmarks from disk to memory"""
733 opts = _byteskwargs(opts)
733 opts = _byteskwargs(opts)
734 timer, fm = gettimer(ui, opts)
734 timer, fm = gettimer(ui, opts)
735
735
736 clearrevlogs = opts[b'clear_revlogs']
736 clearrevlogs = opts[b'clear_revlogs']
737 def s():
737 def s():
738 if clearrevlogs:
738 if clearrevlogs:
739 clearchangelog(repo)
739 clearchangelog(repo)
740 clearfilecache(repo, b'_bookmarks')
740 clearfilecache(repo, b'_bookmarks')
741 def d():
741 def d():
742 repo._bookmarks
742 repo._bookmarks
743 timer(d, setup=s)
743 timer(d, setup=s)
744 fm.end()
744 fm.end()
745
745
746 @command(b'perfbundleread', formatteropts, b'BUNDLE')
746 @command(b'perfbundleread', formatteropts, b'BUNDLE')
747 def perfbundleread(ui, repo, bundlepath, **opts):
747 def perfbundleread(ui, repo, bundlepath, **opts):
748 """Benchmark reading of bundle files.
748 """Benchmark reading of bundle files.
749
749
750 This command is meant to isolate the I/O part of bundle reading as
750 This command is meant to isolate the I/O part of bundle reading as
751 much as possible.
751 much as possible.
752 """
752 """
753 from mercurial import (
753 from mercurial import (
754 bundle2,
754 bundle2,
755 exchange,
755 exchange,
756 streamclone,
756 streamclone,
757 )
757 )
758
758
759 opts = _byteskwargs(opts)
759 opts = _byteskwargs(opts)
760
760
761 def makebench(fn):
761 def makebench(fn):
762 def run():
762 def run():
763 with open(bundlepath, b'rb') as fh:
763 with open(bundlepath, b'rb') as fh:
764 bundle = exchange.readbundle(ui, fh, bundlepath)
764 bundle = exchange.readbundle(ui, fh, bundlepath)
765 fn(bundle)
765 fn(bundle)
766
766
767 return run
767 return run
768
768
769 def makereadnbytes(size):
769 def makereadnbytes(size):
770 def run():
770 def run():
771 with open(bundlepath, b'rb') as fh:
771 with open(bundlepath, b'rb') as fh:
772 bundle = exchange.readbundle(ui, fh, bundlepath)
772 bundle = exchange.readbundle(ui, fh, bundlepath)
773 while bundle.read(size):
773 while bundle.read(size):
774 pass
774 pass
775
775
776 return run
776 return run
777
777
778 def makestdioread(size):
778 def makestdioread(size):
779 def run():
779 def run():
780 with open(bundlepath, b'rb') as fh:
780 with open(bundlepath, b'rb') as fh:
781 while fh.read(size):
781 while fh.read(size):
782 pass
782 pass
783
783
784 return run
784 return run
785
785
786 # bundle1
786 # bundle1
787
787
788 def deltaiter(bundle):
788 def deltaiter(bundle):
789 for delta in bundle.deltaiter():
789 for delta in bundle.deltaiter():
790 pass
790 pass
791
791
792 def iterchunks(bundle):
792 def iterchunks(bundle):
793 for chunk in bundle.getchunks():
793 for chunk in bundle.getchunks():
794 pass
794 pass
795
795
796 # bundle2
796 # bundle2
797
797
798 def forwardchunks(bundle):
798 def forwardchunks(bundle):
799 for chunk in bundle._forwardchunks():
799 for chunk in bundle._forwardchunks():
800 pass
800 pass
801
801
802 def iterparts(bundle):
802 def iterparts(bundle):
803 for part in bundle.iterparts():
803 for part in bundle.iterparts():
804 pass
804 pass
805
805
806 def iterpartsseekable(bundle):
806 def iterpartsseekable(bundle):
807 for part in bundle.iterparts(seekable=True):
807 for part in bundle.iterparts(seekable=True):
808 pass
808 pass
809
809
810 def seek(bundle):
810 def seek(bundle):
811 for part in bundle.iterparts(seekable=True):
811 for part in bundle.iterparts(seekable=True):
812 part.seek(0, os.SEEK_END)
812 part.seek(0, os.SEEK_END)
813
813
814 def makepartreadnbytes(size):
814 def makepartreadnbytes(size):
815 def run():
815 def run():
816 with open(bundlepath, b'rb') as fh:
816 with open(bundlepath, b'rb') as fh:
817 bundle = exchange.readbundle(ui, fh, bundlepath)
817 bundle = exchange.readbundle(ui, fh, bundlepath)
818 for part in bundle.iterparts():
818 for part in bundle.iterparts():
819 while part.read(size):
819 while part.read(size):
820 pass
820 pass
821
821
822 return run
822 return run
823
823
824 benches = [
824 benches = [
825 (makestdioread(8192), b'read(8k)'),
825 (makestdioread(8192), b'read(8k)'),
826 (makestdioread(16384), b'read(16k)'),
826 (makestdioread(16384), b'read(16k)'),
827 (makestdioread(32768), b'read(32k)'),
827 (makestdioread(32768), b'read(32k)'),
828 (makestdioread(131072), b'read(128k)'),
828 (makestdioread(131072), b'read(128k)'),
829 ]
829 ]
830
830
831 with open(bundlepath, b'rb') as fh:
831 with open(bundlepath, b'rb') as fh:
832 bundle = exchange.readbundle(ui, fh, bundlepath)
832 bundle = exchange.readbundle(ui, fh, bundlepath)
833
833
834 if isinstance(bundle, changegroup.cg1unpacker):
834 if isinstance(bundle, changegroup.cg1unpacker):
835 benches.extend([
835 benches.extend([
836 (makebench(deltaiter), b'cg1 deltaiter()'),
836 (makebench(deltaiter), b'cg1 deltaiter()'),
837 (makebench(iterchunks), b'cg1 getchunks()'),
837 (makebench(iterchunks), b'cg1 getchunks()'),
838 (makereadnbytes(8192), b'cg1 read(8k)'),
838 (makereadnbytes(8192), b'cg1 read(8k)'),
839 (makereadnbytes(16384), b'cg1 read(16k)'),
839 (makereadnbytes(16384), b'cg1 read(16k)'),
840 (makereadnbytes(32768), b'cg1 read(32k)'),
840 (makereadnbytes(32768), b'cg1 read(32k)'),
841 (makereadnbytes(131072), b'cg1 read(128k)'),
841 (makereadnbytes(131072), b'cg1 read(128k)'),
842 ])
842 ])
843 elif isinstance(bundle, bundle2.unbundle20):
843 elif isinstance(bundle, bundle2.unbundle20):
844 benches.extend([
844 benches.extend([
845 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
845 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
846 (makebench(iterparts), b'bundle2 iterparts()'),
846 (makebench(iterparts), b'bundle2 iterparts()'),
847 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
847 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
848 (makebench(seek), b'bundle2 part seek()'),
848 (makebench(seek), b'bundle2 part seek()'),
849 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
849 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
850 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
850 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
851 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
851 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
852 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
852 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
853 ])
853 ])
854 elif isinstance(bundle, streamclone.streamcloneapplier):
854 elif isinstance(bundle, streamclone.streamcloneapplier):
855 raise error.Abort(b'stream clone bundles not supported')
855 raise error.Abort(b'stream clone bundles not supported')
856 else:
856 else:
857 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
857 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
858
858
859 for fn, title in benches:
859 for fn, title in benches:
860 timer, fm = gettimer(ui, opts)
860 timer, fm = gettimer(ui, opts)
861 timer(fn, title=title)
861 timer(fn, title=title)
862 fm.end()
862 fm.end()
863
863
864 @command(b'perfchangegroupchangelog', formatteropts +
864 @command(b'perfchangegroupchangelog', formatteropts +
865 [(b'', b'cgversion', b'02', b'changegroup version'),
865 [(b'', b'cgversion', b'02', b'changegroup version'),
866 (b'r', b'rev', b'', b'revisions to add to changegroup')])
866 (b'r', b'rev', b'', b'revisions to add to changegroup')])
867 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
867 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
868 """Benchmark producing a changelog group for a changegroup.
868 """Benchmark producing a changelog group for a changegroup.
869
869
870 This measures the time spent processing the changelog during a
870 This measures the time spent processing the changelog during a
871 bundle operation. This occurs during `hg bundle` and on a server
871 bundle operation. This occurs during `hg bundle` and on a server
872 processing a `getbundle` wire protocol request (handles clones
872 processing a `getbundle` wire protocol request (handles clones
873 and pull requests).
873 and pull requests).
874
874
875 By default, all revisions are added to the changegroup.
875 By default, all revisions are added to the changegroup.
876 """
876 """
877 opts = _byteskwargs(opts)
877 opts = _byteskwargs(opts)
878 cl = repo.changelog
878 cl = repo.changelog
879 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
879 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
880 bundler = changegroup.getbundler(cgversion, repo)
880 bundler = changegroup.getbundler(cgversion, repo)
881
881
882 def d():
882 def d():
883 state, chunks = bundler._generatechangelog(cl, nodes)
883 state, chunks = bundler._generatechangelog(cl, nodes)
884 for chunk in chunks:
884 for chunk in chunks:
885 pass
885 pass
886
886
887 timer, fm = gettimer(ui, opts)
887 timer, fm = gettimer(ui, opts)
888
888
889 # Terminal printing can interfere with timing. So disable it.
889 # Terminal printing can interfere with timing. So disable it.
890 with ui.configoverride({(b'progress', b'disable'): True}):
890 with ui.configoverride({(b'progress', b'disable'): True}):
891 timer(d)
891 timer(d)
892
892
893 fm.end()
893 fm.end()
894
894
895 @command(b'perfdirs', formatteropts)
895 @command(b'perfdirs', formatteropts)
896 def perfdirs(ui, repo, **opts):
896 def perfdirs(ui, repo, **opts):
897 opts = _byteskwargs(opts)
897 opts = _byteskwargs(opts)
898 timer, fm = gettimer(ui, opts)
898 timer, fm = gettimer(ui, opts)
899 dirstate = repo.dirstate
899 dirstate = repo.dirstate
900 b'a' in dirstate
900 b'a' in dirstate
901 def d():
901 def d():
902 dirstate.hasdir(b'a')
902 dirstate.hasdir(b'a')
903 del dirstate._map._dirs
903 del dirstate._map._dirs
904 timer(d)
904 timer(d)
905 fm.end()
905 fm.end()
906
906
907 @command(b'perfdirstate', formatteropts)
907 @command(b'perfdirstate', formatteropts)
908 def perfdirstate(ui, repo, **opts):
908 def perfdirstate(ui, repo, **opts):
909 opts = _byteskwargs(opts)
909 opts = _byteskwargs(opts)
910 timer, fm = gettimer(ui, opts)
910 timer, fm = gettimer(ui, opts)
911 b"a" in repo.dirstate
911 b"a" in repo.dirstate
912 def d():
912 def d():
913 repo.dirstate.invalidate()
913 repo.dirstate.invalidate()
914 b"a" in repo.dirstate
914 b"a" in repo.dirstate
915 timer(d)
915 timer(d)
916 fm.end()
916 fm.end()
917
917
918 @command(b'perfdirstatedirs', formatteropts)
918 @command(b'perfdirstatedirs', formatteropts)
919 def perfdirstatedirs(ui, repo, **opts):
919 def perfdirstatedirs(ui, repo, **opts):
920 opts = _byteskwargs(opts)
920 opts = _byteskwargs(opts)
921 timer, fm = gettimer(ui, opts)
921 timer, fm = gettimer(ui, opts)
922 b"a" in repo.dirstate
922 b"a" in repo.dirstate
923 def d():
923 def d():
924 repo.dirstate.hasdir(b"a")
924 repo.dirstate.hasdir(b"a")
925 del repo.dirstate._map._dirs
925 del repo.dirstate._map._dirs
926 timer(d)
926 timer(d)
927 fm.end()
927 fm.end()
928
928
929 @command(b'perfdirstatefoldmap', formatteropts)
929 @command(b'perfdirstatefoldmap', formatteropts)
930 def perfdirstatefoldmap(ui, repo, **opts):
930 def perfdirstatefoldmap(ui, repo, **opts):
931 opts = _byteskwargs(opts)
931 opts = _byteskwargs(opts)
932 timer, fm = gettimer(ui, opts)
932 timer, fm = gettimer(ui, opts)
933 dirstate = repo.dirstate
933 dirstate = repo.dirstate
934 b'a' in dirstate
934 b'a' in dirstate
935 def d():
935 def d():
936 dirstate._map.filefoldmap.get(b'a')
936 dirstate._map.filefoldmap.get(b'a')
937 del dirstate._map.filefoldmap
937 del dirstate._map.filefoldmap
938 timer(d)
938 timer(d)
939 fm.end()
939 fm.end()
940
940
941 @command(b'perfdirfoldmap', formatteropts)
941 @command(b'perfdirfoldmap', formatteropts)
942 def perfdirfoldmap(ui, repo, **opts):
942 def perfdirfoldmap(ui, repo, **opts):
943 opts = _byteskwargs(opts)
943 opts = _byteskwargs(opts)
944 timer, fm = gettimer(ui, opts)
944 timer, fm = gettimer(ui, opts)
945 dirstate = repo.dirstate
945 dirstate = repo.dirstate
946 b'a' in dirstate
946 b'a' in dirstate
947 def d():
947 def d():
948 dirstate._map.dirfoldmap.get(b'a')
948 dirstate._map.dirfoldmap.get(b'a')
949 del dirstate._map.dirfoldmap
949 del dirstate._map.dirfoldmap
950 del dirstate._map._dirs
950 del dirstate._map._dirs
951 timer(d)
951 timer(d)
952 fm.end()
952 fm.end()
953
953
954 @command(b'perfdirstatewrite', formatteropts)
954 @command(b'perfdirstatewrite', formatteropts)
955 def perfdirstatewrite(ui, repo, **opts):
955 def perfdirstatewrite(ui, repo, **opts):
956 opts = _byteskwargs(opts)
956 opts = _byteskwargs(opts)
957 timer, fm = gettimer(ui, opts)
957 timer, fm = gettimer(ui, opts)
958 ds = repo.dirstate
958 ds = repo.dirstate
959 b"a" in ds
959 b"a" in ds
960 def d():
960 def d():
961 ds._dirty = True
961 ds._dirty = True
962 ds.write(repo.currenttransaction())
962 ds.write(repo.currenttransaction())
963 timer(d)
963 timer(d)
964 fm.end()
964 fm.end()
965
965
966 @command(b'perfmergecalculate',
966 @command(b'perfmergecalculate',
967 [(b'r', b'rev', b'.', b'rev to merge against')] + formatteropts)
967 [
968 (b'r', b'rev', b'.', b'rev to merge against'),
969 (b'', b'from', b'', b'rev to merge from'),
970 ] + formatteropts)
968 def perfmergecalculate(ui, repo, rev, **opts):
971 def perfmergecalculate(ui, repo, rev, **opts):
969 opts = _byteskwargs(opts)
972 opts = _byteskwargs(opts)
970 timer, fm = gettimer(ui, opts)
973 timer, fm = gettimer(ui, opts)
974
975 if opts['from']:
976 fromrev = scmutil.revsingle(repo, opts['from'])
977 wctx = repo[fromrev]
978 else:
971 wctx = repo[None]
979 wctx = repo[None]
980 # we don't want working dir files to be stat'd in the benchmark, so
981 # prime that cache
982 wctx.dirty()
972 rctx = scmutil.revsingle(repo, rev, rev)
983 rctx = scmutil.revsingle(repo, rev, rev)
973 ancestor = wctx.ancestor(rctx)
984 ancestor = wctx.ancestor(rctx)
974 # we don't want working dir files to be stat'd in the benchmark, so prime
975 # that cache
976 wctx.dirty()
977 def d():
985 def d():
978 # acceptremote is True because we don't want prompts in the middle of
986 # acceptremote is True because we don't want prompts in the middle of
979 # our benchmark
987 # our benchmark
980 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
988 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
981 acceptremote=True, followcopies=True)
989 acceptremote=True, followcopies=True)
982 timer(d)
990 timer(d)
983 fm.end()
991 fm.end()
984
992
985 @command(b'perfpathcopies', [], b"REV REV")
993 @command(b'perfpathcopies', [], b"REV REV")
986 def perfpathcopies(ui, repo, rev1, rev2, **opts):
994 def perfpathcopies(ui, repo, rev1, rev2, **opts):
987 """benchmark the copy tracing logic"""
995 """benchmark the copy tracing logic"""
988 opts = _byteskwargs(opts)
996 opts = _byteskwargs(opts)
989 timer, fm = gettimer(ui, opts)
997 timer, fm = gettimer(ui, opts)
990 ctx1 = scmutil.revsingle(repo, rev1, rev1)
998 ctx1 = scmutil.revsingle(repo, rev1, rev1)
991 ctx2 = scmutil.revsingle(repo, rev2, rev2)
999 ctx2 = scmutil.revsingle(repo, rev2, rev2)
992 def d():
1000 def d():
993 copies.pathcopies(ctx1, ctx2)
1001 copies.pathcopies(ctx1, ctx2)
994 timer(d)
1002 timer(d)
995 fm.end()
1003 fm.end()
996
1004
997 @command(b'perfphases',
1005 @command(b'perfphases',
998 [(b'', b'full', False, b'include file reading time too'),
1006 [(b'', b'full', False, b'include file reading time too'),
999 ], b"")
1007 ], b"")
1000 def perfphases(ui, repo, **opts):
1008 def perfphases(ui, repo, **opts):
1001 """benchmark phasesets computation"""
1009 """benchmark phasesets computation"""
1002 opts = _byteskwargs(opts)
1010 opts = _byteskwargs(opts)
1003 timer, fm = gettimer(ui, opts)
1011 timer, fm = gettimer(ui, opts)
1004 _phases = repo._phasecache
1012 _phases = repo._phasecache
1005 full = opts.get(b'full')
1013 full = opts.get(b'full')
1006 def d():
1014 def d():
1007 phases = _phases
1015 phases = _phases
1008 if full:
1016 if full:
1009 clearfilecache(repo, b'_phasecache')
1017 clearfilecache(repo, b'_phasecache')
1010 phases = repo._phasecache
1018 phases = repo._phasecache
1011 phases.invalidate()
1019 phases.invalidate()
1012 phases.loadphaserevs(repo)
1020 phases.loadphaserevs(repo)
1013 timer(d)
1021 timer(d)
1014 fm.end()
1022 fm.end()
1015
1023
1016 @command(b'perfphasesremote',
1024 @command(b'perfphasesremote',
1017 [], b"[DEST]")
1025 [], b"[DEST]")
1018 def perfphasesremote(ui, repo, dest=None, **opts):
1026 def perfphasesremote(ui, repo, dest=None, **opts):
1019 """benchmark time needed to analyse phases of the remote server"""
1027 """benchmark time needed to analyse phases of the remote server"""
1020 from mercurial.node import (
1028 from mercurial.node import (
1021 bin,
1029 bin,
1022 )
1030 )
1023 from mercurial import (
1031 from mercurial import (
1024 exchange,
1032 exchange,
1025 hg,
1033 hg,
1026 phases,
1034 phases,
1027 )
1035 )
1028 opts = _byteskwargs(opts)
1036 opts = _byteskwargs(opts)
1029 timer, fm = gettimer(ui, opts)
1037 timer, fm = gettimer(ui, opts)
1030
1038
1031 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
1039 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
1032 if not path:
1040 if not path:
1033 raise error.Abort((b'default repository not configured!'),
1041 raise error.Abort((b'default repository not configured!'),
1034 hint=(b"see 'hg help config.paths'"))
1042 hint=(b"see 'hg help config.paths'"))
1035 dest = path.pushloc or path.loc
1043 dest = path.pushloc or path.loc
1036 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
1044 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
1037 other = hg.peer(repo, opts, dest)
1045 other = hg.peer(repo, opts, dest)
1038
1046
1039 # easier to perform discovery through the operation
1047 # easier to perform discovery through the operation
1040 op = exchange.pushoperation(repo, other)
1048 op = exchange.pushoperation(repo, other)
1041 exchange._pushdiscoverychangeset(op)
1049 exchange._pushdiscoverychangeset(op)
1042
1050
1043 remotesubset = op.fallbackheads
1051 remotesubset = op.fallbackheads
1044
1052
1045 with other.commandexecutor() as e:
1053 with other.commandexecutor() as e:
1046 remotephases = e.callcommand(b'listkeys',
1054 remotephases = e.callcommand(b'listkeys',
1047 {b'namespace': b'phases'}).result()
1055 {b'namespace': b'phases'}).result()
1048 del other
1056 del other
1049 publishing = remotephases.get(b'publishing', False)
1057 publishing = remotephases.get(b'publishing', False)
1050 if publishing:
1058 if publishing:
1051 ui.status((b'publishing: yes\n'))
1059 ui.status((b'publishing: yes\n'))
1052 else:
1060 else:
1053 ui.status((b'publishing: no\n'))
1061 ui.status((b'publishing: no\n'))
1054
1062
1055 nodemap = repo.changelog.nodemap
1063 nodemap = repo.changelog.nodemap
1056 nonpublishroots = 0
1064 nonpublishroots = 0
1057 for nhex, phase in remotephases.iteritems():
1065 for nhex, phase in remotephases.iteritems():
1058 if nhex == b'publishing': # ignore data related to publish option
1066 if nhex == b'publishing': # ignore data related to publish option
1059 continue
1067 continue
1060 node = bin(nhex)
1068 node = bin(nhex)
1061 if node in nodemap and int(phase):
1069 if node in nodemap and int(phase):
1062 nonpublishroots += 1
1070 nonpublishroots += 1
1063 ui.status((b'number of roots: %d\n') % len(remotephases))
1071 ui.status((b'number of roots: %d\n') % len(remotephases))
1064 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
1072 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
1065 def d():
1073 def d():
1066 phases.remotephasessummary(repo,
1074 phases.remotephasessummary(repo,
1067 remotesubset,
1075 remotesubset,
1068 remotephases)
1076 remotephases)
1069 timer(d)
1077 timer(d)
1070 fm.end()
1078 fm.end()
1071
1079
1072 @command(b'perfmanifest',[
1080 @command(b'perfmanifest',[
1073 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1081 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1074 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1082 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1075 ] + formatteropts, b'REV|NODE')
1083 ] + formatteropts, b'REV|NODE')
1076 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1084 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1077 """benchmark the time to read a manifest from disk and return a usable
1085 """benchmark the time to read a manifest from disk and return a usable
1078 dict-like object
1086 dict-like object
1079
1087
1080 Manifest caches are cleared before retrieval."""
1088 Manifest caches are cleared before retrieval."""
1081 opts = _byteskwargs(opts)
1089 opts = _byteskwargs(opts)
1082 timer, fm = gettimer(ui, opts)
1090 timer, fm = gettimer(ui, opts)
1083 if not manifest_rev:
1091 if not manifest_rev:
1084 ctx = scmutil.revsingle(repo, rev, rev)
1092 ctx = scmutil.revsingle(repo, rev, rev)
1085 t = ctx.manifestnode()
1093 t = ctx.manifestnode()
1086 else:
1094 else:
1087 from mercurial.node import bin
1095 from mercurial.node import bin
1088
1096
1089 if len(rev) == 40:
1097 if len(rev) == 40:
1090 t = bin(rev)
1098 t = bin(rev)
1091 else:
1099 else:
1092 try:
1100 try:
1093 rev = int(rev)
1101 rev = int(rev)
1094
1102
1095 if util.safehasattr(repo.manifestlog, b'getstorage'):
1103 if util.safehasattr(repo.manifestlog, b'getstorage'):
1096 t = repo.manifestlog.getstorage(b'').node(rev)
1104 t = repo.manifestlog.getstorage(b'').node(rev)
1097 else:
1105 else:
1098 t = repo.manifestlog._revlog.lookup(rev)
1106 t = repo.manifestlog._revlog.lookup(rev)
1099 except ValueError:
1107 except ValueError:
1100 raise error.Abort(b'manifest revision must be integer or full '
1108 raise error.Abort(b'manifest revision must be integer or full '
1101 b'node')
1109 b'node')
1102 def d():
1110 def d():
1103 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1111 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1104 repo.manifestlog[t].read()
1112 repo.manifestlog[t].read()
1105 timer(d)
1113 timer(d)
1106 fm.end()
1114 fm.end()
1107
1115
1108 @command(b'perfchangeset', formatteropts)
1116 @command(b'perfchangeset', formatteropts)
1109 def perfchangeset(ui, repo, rev, **opts):
1117 def perfchangeset(ui, repo, rev, **opts):
1110 opts = _byteskwargs(opts)
1118 opts = _byteskwargs(opts)
1111 timer, fm = gettimer(ui, opts)
1119 timer, fm = gettimer(ui, opts)
1112 n = scmutil.revsingle(repo, rev).node()
1120 n = scmutil.revsingle(repo, rev).node()
1113 def d():
1121 def d():
1114 repo.changelog.read(n)
1122 repo.changelog.read(n)
1115 #repo.changelog._cache = None
1123 #repo.changelog._cache = None
1116 timer(d)
1124 timer(d)
1117 fm.end()
1125 fm.end()
1118
1126
1119 @command(b'perfignore', formatteropts)
1127 @command(b'perfignore', formatteropts)
1120 def perfignore(ui, repo, **opts):
1128 def perfignore(ui, repo, **opts):
1121 """benchmark operation related to computing ignore"""
1129 """benchmark operation related to computing ignore"""
1122 opts = _byteskwargs(opts)
1130 opts = _byteskwargs(opts)
1123 timer, fm = gettimer(ui, opts)
1131 timer, fm = gettimer(ui, opts)
1124 dirstate = repo.dirstate
1132 dirstate = repo.dirstate
1125
1133
1126 def setupone():
1134 def setupone():
1127 dirstate.invalidate()
1135 dirstate.invalidate()
1128 clearfilecache(dirstate, b'_ignore')
1136 clearfilecache(dirstate, b'_ignore')
1129
1137
1130 def runone():
1138 def runone():
1131 dirstate._ignore
1139 dirstate._ignore
1132
1140
1133 timer(runone, setup=setupone, title=b"load")
1141 timer(runone, setup=setupone, title=b"load")
1134 fm.end()
1142 fm.end()
1135
1143
1136 @command(b'perfindex', [
1144 @command(b'perfindex', [
1137 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1145 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1138 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1146 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1139 ] + formatteropts)
1147 ] + formatteropts)
1140 def perfindex(ui, repo, **opts):
1148 def perfindex(ui, repo, **opts):
1141 """benchmark index creation time followed by a lookup
1149 """benchmark index creation time followed by a lookup
1142
1150
1143 The default is to look `tip` up. Depending on the index implementation,
1151 The default is to look `tip` up. Depending on the index implementation,
1144 the revision looked up can matters. For example, an implementation
1152 the revision looked up can matters. For example, an implementation
1145 scanning the index will have a faster lookup time for `--rev tip` than for
1153 scanning the index will have a faster lookup time for `--rev tip` than for
1146 `--rev 0`. The number of looked up revisions and their order can also
1154 `--rev 0`. The number of looked up revisions and their order can also
1147 matters.
1155 matters.
1148
1156
1149 Example of useful set to test:
1157 Example of useful set to test:
1150 * tip
1158 * tip
1151 * 0
1159 * 0
1152 * -10:
1160 * -10:
1153 * :10
1161 * :10
1154 * -10: + :10
1162 * -10: + :10
1155 * :10: + -10:
1163 * :10: + -10:
1156 * -10000:
1164 * -10000:
1157 * -10000: + 0
1165 * -10000: + 0
1158
1166
1159 It is not currently possible to check for lookup of a missing node. For
1167 It is not currently possible to check for lookup of a missing node. For
1160 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1168 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1161 import mercurial.revlog
1169 import mercurial.revlog
1162 opts = _byteskwargs(opts)
1170 opts = _byteskwargs(opts)
1163 timer, fm = gettimer(ui, opts)
1171 timer, fm = gettimer(ui, opts)
1164 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1172 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1165 if opts[b'no_lookup']:
1173 if opts[b'no_lookup']:
1166 if opts['rev']:
1174 if opts['rev']:
1167 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1175 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1168 nodes = []
1176 nodes = []
1169 elif not opts[b'rev']:
1177 elif not opts[b'rev']:
1170 nodes = [repo[b"tip"].node()]
1178 nodes = [repo[b"tip"].node()]
1171 else:
1179 else:
1172 revs = scmutil.revrange(repo, opts[b'rev'])
1180 revs = scmutil.revrange(repo, opts[b'rev'])
1173 cl = repo.changelog
1181 cl = repo.changelog
1174 nodes = [cl.node(r) for r in revs]
1182 nodes = [cl.node(r) for r in revs]
1175
1183
1176 unfi = repo.unfiltered()
1184 unfi = repo.unfiltered()
1177 # find the filecache func directly
1185 # find the filecache func directly
1178 # This avoid polluting the benchmark with the filecache logic
1186 # This avoid polluting the benchmark with the filecache logic
1179 makecl = unfi.__class__.changelog.func
1187 makecl = unfi.__class__.changelog.func
1180 def setup():
1188 def setup():
1181 # probably not necessary, but for good measure
1189 # probably not necessary, but for good measure
1182 clearchangelog(unfi)
1190 clearchangelog(unfi)
1183 def d():
1191 def d():
1184 cl = makecl(unfi)
1192 cl = makecl(unfi)
1185 for n in nodes:
1193 for n in nodes:
1186 cl.rev(n)
1194 cl.rev(n)
1187 timer(d, setup=setup)
1195 timer(d, setup=setup)
1188 fm.end()
1196 fm.end()
1189
1197
1190 @command(b'perfnodemap', [
1198 @command(b'perfnodemap', [
1191 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1199 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1192 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1200 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1193 ] + formatteropts)
1201 ] + formatteropts)
1194 def perfnodemap(ui, repo, **opts):
1202 def perfnodemap(ui, repo, **opts):
1195 """benchmark the time necessary to look up revision from a cold nodemap
1203 """benchmark the time necessary to look up revision from a cold nodemap
1196
1204
1197 Depending on the implementation, the amount and order of revision we look
1205 Depending on the implementation, the amount and order of revision we look
1198 up can varies. Example of useful set to test:
1206 up can varies. Example of useful set to test:
1199 * tip
1207 * tip
1200 * 0
1208 * 0
1201 * -10:
1209 * -10:
1202 * :10
1210 * :10
1203 * -10: + :10
1211 * -10: + :10
1204 * :10: + -10:
1212 * :10: + -10:
1205 * -10000:
1213 * -10000:
1206 * -10000: + 0
1214 * -10000: + 0
1207
1215
1208 The command currently focus on valid binary lookup. Benchmarking for
1216 The command currently focus on valid binary lookup. Benchmarking for
1209 hexlookup, prefix lookup and missing lookup would also be valuable.
1217 hexlookup, prefix lookup and missing lookup would also be valuable.
1210 """
1218 """
1211 import mercurial.revlog
1219 import mercurial.revlog
1212 opts = _byteskwargs(opts)
1220 opts = _byteskwargs(opts)
1213 timer, fm = gettimer(ui, opts)
1221 timer, fm = gettimer(ui, opts)
1214 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1222 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1215
1223
1216 unfi = repo.unfiltered()
1224 unfi = repo.unfiltered()
1217 clearcaches = opts['clear_caches']
1225 clearcaches = opts['clear_caches']
1218 # find the filecache func directly
1226 # find the filecache func directly
1219 # This avoid polluting the benchmark with the filecache logic
1227 # This avoid polluting the benchmark with the filecache logic
1220 makecl = unfi.__class__.changelog.func
1228 makecl = unfi.__class__.changelog.func
1221 if not opts[b'rev']:
1229 if not opts[b'rev']:
1222 raise error.Abort('use --rev to specify revisions to look up')
1230 raise error.Abort('use --rev to specify revisions to look up')
1223 revs = scmutil.revrange(repo, opts[b'rev'])
1231 revs = scmutil.revrange(repo, opts[b'rev'])
1224 cl = repo.changelog
1232 cl = repo.changelog
1225 nodes = [cl.node(r) for r in revs]
1233 nodes = [cl.node(r) for r in revs]
1226
1234
1227 # use a list to pass reference to a nodemap from one closure to the next
1235 # use a list to pass reference to a nodemap from one closure to the next
1228 nodeget = [None]
1236 nodeget = [None]
1229 def setnodeget():
1237 def setnodeget():
1230 # probably not necessary, but for good measure
1238 # probably not necessary, but for good measure
1231 clearchangelog(unfi)
1239 clearchangelog(unfi)
1232 nodeget[0] = makecl(unfi).nodemap.get
1240 nodeget[0] = makecl(unfi).nodemap.get
1233
1241
1234 def d():
1242 def d():
1235 get = nodeget[0]
1243 get = nodeget[0]
1236 for n in nodes:
1244 for n in nodes:
1237 get(n)
1245 get(n)
1238
1246
1239 setup = None
1247 setup = None
1240 if clearcaches:
1248 if clearcaches:
1241 def setup():
1249 def setup():
1242 setnodeget()
1250 setnodeget()
1243 else:
1251 else:
1244 setnodeget()
1252 setnodeget()
1245 d() # prewarm the data structure
1253 d() # prewarm the data structure
1246 timer(d, setup=setup)
1254 timer(d, setup=setup)
1247 fm.end()
1255 fm.end()
1248
1256
1249 @command(b'perfstartup', formatteropts)
1257 @command(b'perfstartup', formatteropts)
1250 def perfstartup(ui, repo, **opts):
1258 def perfstartup(ui, repo, **opts):
1251 opts = _byteskwargs(opts)
1259 opts = _byteskwargs(opts)
1252 timer, fm = gettimer(ui, opts)
1260 timer, fm = gettimer(ui, opts)
1253 def d():
1261 def d():
1254 if os.name != r'nt':
1262 if os.name != r'nt':
1255 os.system(b"HGRCPATH= %s version -q > /dev/null" %
1263 os.system(b"HGRCPATH= %s version -q > /dev/null" %
1256 fsencode(sys.argv[0]))
1264 fsencode(sys.argv[0]))
1257 else:
1265 else:
1258 os.environ[r'HGRCPATH'] = r' '
1266 os.environ[r'HGRCPATH'] = r' '
1259 os.system(r"%s version -q > NUL" % sys.argv[0])
1267 os.system(r"%s version -q > NUL" % sys.argv[0])
1260 timer(d)
1268 timer(d)
1261 fm.end()
1269 fm.end()
1262
1270
1263 @command(b'perfparents', formatteropts)
1271 @command(b'perfparents', formatteropts)
1264 def perfparents(ui, repo, **opts):
1272 def perfparents(ui, repo, **opts):
1265 """benchmark the time necessary to fetch one changeset's parents.
1273 """benchmark the time necessary to fetch one changeset's parents.
1266
1274
1267 The fetch is done using the `node identifier`, traversing all object layers
1275 The fetch is done using the `node identifier`, traversing all object layers
1268 from the repository object. The first N revisions will be used for this
1276 from the repository object. The first N revisions will be used for this
1269 benchmark. N is controlled by the ``perf.parentscount`` config option
1277 benchmark. N is controlled by the ``perf.parentscount`` config option
1270 (default: 1000).
1278 (default: 1000).
1271 """
1279 """
1272 opts = _byteskwargs(opts)
1280 opts = _byteskwargs(opts)
1273 timer, fm = gettimer(ui, opts)
1281 timer, fm = gettimer(ui, opts)
1274 # control the number of commits perfparents iterates over
1282 # control the number of commits perfparents iterates over
1275 # experimental config: perf.parentscount
1283 # experimental config: perf.parentscount
1276 count = getint(ui, b"perf", b"parentscount", 1000)
1284 count = getint(ui, b"perf", b"parentscount", 1000)
1277 if len(repo.changelog) < count:
1285 if len(repo.changelog) < count:
1278 raise error.Abort(b"repo needs %d commits for this test" % count)
1286 raise error.Abort(b"repo needs %d commits for this test" % count)
1279 repo = repo.unfiltered()
1287 repo = repo.unfiltered()
1280 nl = [repo.changelog.node(i) for i in _xrange(count)]
1288 nl = [repo.changelog.node(i) for i in _xrange(count)]
1281 def d():
1289 def d():
1282 for n in nl:
1290 for n in nl:
1283 repo.changelog.parents(n)
1291 repo.changelog.parents(n)
1284 timer(d)
1292 timer(d)
1285 fm.end()
1293 fm.end()
1286
1294
1287 @command(b'perfctxfiles', formatteropts)
1295 @command(b'perfctxfiles', formatteropts)
1288 def perfctxfiles(ui, repo, x, **opts):
1296 def perfctxfiles(ui, repo, x, **opts):
1289 opts = _byteskwargs(opts)
1297 opts = _byteskwargs(opts)
1290 x = int(x)
1298 x = int(x)
1291 timer, fm = gettimer(ui, opts)
1299 timer, fm = gettimer(ui, opts)
1292 def d():
1300 def d():
1293 len(repo[x].files())
1301 len(repo[x].files())
1294 timer(d)
1302 timer(d)
1295 fm.end()
1303 fm.end()
1296
1304
1297 @command(b'perfrawfiles', formatteropts)
1305 @command(b'perfrawfiles', formatteropts)
1298 def perfrawfiles(ui, repo, x, **opts):
1306 def perfrawfiles(ui, repo, x, **opts):
1299 opts = _byteskwargs(opts)
1307 opts = _byteskwargs(opts)
1300 x = int(x)
1308 x = int(x)
1301 timer, fm = gettimer(ui, opts)
1309 timer, fm = gettimer(ui, opts)
1302 cl = repo.changelog
1310 cl = repo.changelog
1303 def d():
1311 def d():
1304 len(cl.read(x)[3])
1312 len(cl.read(x)[3])
1305 timer(d)
1313 timer(d)
1306 fm.end()
1314 fm.end()
1307
1315
1308 @command(b'perflookup', formatteropts)
1316 @command(b'perflookup', formatteropts)
1309 def perflookup(ui, repo, rev, **opts):
1317 def perflookup(ui, repo, rev, **opts):
1310 opts = _byteskwargs(opts)
1318 opts = _byteskwargs(opts)
1311 timer, fm = gettimer(ui, opts)
1319 timer, fm = gettimer(ui, opts)
1312 timer(lambda: len(repo.lookup(rev)))
1320 timer(lambda: len(repo.lookup(rev)))
1313 fm.end()
1321 fm.end()
1314
1322
1315 @command(b'perflinelogedits',
1323 @command(b'perflinelogedits',
1316 [(b'n', b'edits', 10000, b'number of edits'),
1324 [(b'n', b'edits', 10000, b'number of edits'),
1317 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1325 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1318 ], norepo=True)
1326 ], norepo=True)
1319 def perflinelogedits(ui, **opts):
1327 def perflinelogedits(ui, **opts):
1320 from mercurial import linelog
1328 from mercurial import linelog
1321
1329
1322 opts = _byteskwargs(opts)
1330 opts = _byteskwargs(opts)
1323
1331
1324 edits = opts[b'edits']
1332 edits = opts[b'edits']
1325 maxhunklines = opts[b'max_hunk_lines']
1333 maxhunklines = opts[b'max_hunk_lines']
1326
1334
1327 maxb1 = 100000
1335 maxb1 = 100000
1328 random.seed(0)
1336 random.seed(0)
1329 randint = random.randint
1337 randint = random.randint
1330 currentlines = 0
1338 currentlines = 0
1331 arglist = []
1339 arglist = []
1332 for rev in _xrange(edits):
1340 for rev in _xrange(edits):
1333 a1 = randint(0, currentlines)
1341 a1 = randint(0, currentlines)
1334 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1342 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1335 b1 = randint(0, maxb1)
1343 b1 = randint(0, maxb1)
1336 b2 = randint(b1, b1 + maxhunklines)
1344 b2 = randint(b1, b1 + maxhunklines)
1337 currentlines += (b2 - b1) - (a2 - a1)
1345 currentlines += (b2 - b1) - (a2 - a1)
1338 arglist.append((rev, a1, a2, b1, b2))
1346 arglist.append((rev, a1, a2, b1, b2))
1339
1347
1340 def d():
1348 def d():
1341 ll = linelog.linelog()
1349 ll = linelog.linelog()
1342 for args in arglist:
1350 for args in arglist:
1343 ll.replacelines(*args)
1351 ll.replacelines(*args)
1344
1352
1345 timer, fm = gettimer(ui, opts)
1353 timer, fm = gettimer(ui, opts)
1346 timer(d)
1354 timer(d)
1347 fm.end()
1355 fm.end()
1348
1356
1349 @command(b'perfrevrange', formatteropts)
1357 @command(b'perfrevrange', formatteropts)
1350 def perfrevrange(ui, repo, *specs, **opts):
1358 def perfrevrange(ui, repo, *specs, **opts):
1351 opts = _byteskwargs(opts)
1359 opts = _byteskwargs(opts)
1352 timer, fm = gettimer(ui, opts)
1360 timer, fm = gettimer(ui, opts)
1353 revrange = scmutil.revrange
1361 revrange = scmutil.revrange
1354 timer(lambda: len(revrange(repo, specs)))
1362 timer(lambda: len(revrange(repo, specs)))
1355 fm.end()
1363 fm.end()
1356
1364
1357 @command(b'perfnodelookup', formatteropts)
1365 @command(b'perfnodelookup', formatteropts)
1358 def perfnodelookup(ui, repo, rev, **opts):
1366 def perfnodelookup(ui, repo, rev, **opts):
1359 opts = _byteskwargs(opts)
1367 opts = _byteskwargs(opts)
1360 timer, fm = gettimer(ui, opts)
1368 timer, fm = gettimer(ui, opts)
1361 import mercurial.revlog
1369 import mercurial.revlog
1362 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1370 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1363 n = scmutil.revsingle(repo, rev).node()
1371 n = scmutil.revsingle(repo, rev).node()
1364 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1372 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1365 def d():
1373 def d():
1366 cl.rev(n)
1374 cl.rev(n)
1367 clearcaches(cl)
1375 clearcaches(cl)
1368 timer(d)
1376 timer(d)
1369 fm.end()
1377 fm.end()
1370
1378
1371 @command(b'perflog',
1379 @command(b'perflog',
1372 [(b'', b'rename', False, b'ask log to follow renames')
1380 [(b'', b'rename', False, b'ask log to follow renames')
1373 ] + formatteropts)
1381 ] + formatteropts)
1374 def perflog(ui, repo, rev=None, **opts):
1382 def perflog(ui, repo, rev=None, **opts):
1375 opts = _byteskwargs(opts)
1383 opts = _byteskwargs(opts)
1376 if rev is None:
1384 if rev is None:
1377 rev=[]
1385 rev=[]
1378 timer, fm = gettimer(ui, opts)
1386 timer, fm = gettimer(ui, opts)
1379 ui.pushbuffer()
1387 ui.pushbuffer()
1380 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1388 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1381 copies=opts.get(b'rename')))
1389 copies=opts.get(b'rename')))
1382 ui.popbuffer()
1390 ui.popbuffer()
1383 fm.end()
1391 fm.end()
1384
1392
1385 @command(b'perfmoonwalk', formatteropts)
1393 @command(b'perfmoonwalk', formatteropts)
1386 def perfmoonwalk(ui, repo, **opts):
1394 def perfmoonwalk(ui, repo, **opts):
1387 """benchmark walking the changelog backwards
1395 """benchmark walking the changelog backwards
1388
1396
1389 This also loads the changelog data for each revision in the changelog.
1397 This also loads the changelog data for each revision in the changelog.
1390 """
1398 """
1391 opts = _byteskwargs(opts)
1399 opts = _byteskwargs(opts)
1392 timer, fm = gettimer(ui, opts)
1400 timer, fm = gettimer(ui, opts)
1393 def moonwalk():
1401 def moonwalk():
1394 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1402 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1395 ctx = repo[i]
1403 ctx = repo[i]
1396 ctx.branch() # read changelog data (in addition to the index)
1404 ctx.branch() # read changelog data (in addition to the index)
1397 timer(moonwalk)
1405 timer(moonwalk)
1398 fm.end()
1406 fm.end()
1399
1407
1400 @command(b'perftemplating',
1408 @command(b'perftemplating',
1401 [(b'r', b'rev', [], b'revisions to run the template on'),
1409 [(b'r', b'rev', [], b'revisions to run the template on'),
1402 ] + formatteropts)
1410 ] + formatteropts)
1403 def perftemplating(ui, repo, testedtemplate=None, **opts):
1411 def perftemplating(ui, repo, testedtemplate=None, **opts):
1404 """test the rendering time of a given template"""
1412 """test the rendering time of a given template"""
1405 if makelogtemplater is None:
1413 if makelogtemplater is None:
1406 raise error.Abort((b"perftemplating not available with this Mercurial"),
1414 raise error.Abort((b"perftemplating not available with this Mercurial"),
1407 hint=b"use 4.3 or later")
1415 hint=b"use 4.3 or later")
1408
1416
1409 opts = _byteskwargs(opts)
1417 opts = _byteskwargs(opts)
1410
1418
1411 nullui = ui.copy()
1419 nullui = ui.copy()
1412 nullui.fout = open(os.devnull, r'wb')
1420 nullui.fout = open(os.devnull, r'wb')
1413 nullui.disablepager()
1421 nullui.disablepager()
1414 revs = opts.get(b'rev')
1422 revs = opts.get(b'rev')
1415 if not revs:
1423 if not revs:
1416 revs = [b'all()']
1424 revs = [b'all()']
1417 revs = list(scmutil.revrange(repo, revs))
1425 revs = list(scmutil.revrange(repo, revs))
1418
1426
1419 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1427 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1420 b' {author|person}: {desc|firstline}\n')
1428 b' {author|person}: {desc|firstline}\n')
1421 if testedtemplate is None:
1429 if testedtemplate is None:
1422 testedtemplate = defaulttemplate
1430 testedtemplate = defaulttemplate
1423 displayer = makelogtemplater(nullui, repo, testedtemplate)
1431 displayer = makelogtemplater(nullui, repo, testedtemplate)
1424 def format():
1432 def format():
1425 for r in revs:
1433 for r in revs:
1426 ctx = repo[r]
1434 ctx = repo[r]
1427 displayer.show(ctx)
1435 displayer.show(ctx)
1428 displayer.flush(ctx)
1436 displayer.flush(ctx)
1429
1437
1430 timer, fm = gettimer(ui, opts)
1438 timer, fm = gettimer(ui, opts)
1431 timer(format)
1439 timer(format)
1432 fm.end()
1440 fm.end()
1433
1441
1434 @command(b'perfhelper-pathcopies', formatteropts +
1442 @command(b'perfhelper-pathcopies', formatteropts +
1435 [
1443 [
1436 (b'r', b'revs', [], b'restrict search to these revisions'),
1444 (b'r', b'revs', [], b'restrict search to these revisions'),
1437 (b'', b'timing', False, b'provides extra data (costly)'),
1445 (b'', b'timing', False, b'provides extra data (costly)'),
1438 ])
1446 ])
1439 def perfhelperpathcopies(ui, repo, revs=[], **opts):
1447 def perfhelperpathcopies(ui, repo, revs=[], **opts):
1440 """find statistic about potential parameters for the `perftracecopies`
1448 """find statistic about potential parameters for the `perftracecopies`
1441
1449
1442 This command find source-destination pair relevant for copytracing testing.
1450 This command find source-destination pair relevant for copytracing testing.
1443 It report value for some of the parameters that impact copy tracing time.
1451 It report value for some of the parameters that impact copy tracing time.
1444
1452
1445 If `--timing` is set, rename detection is run and the associated timing
1453 If `--timing` is set, rename detection is run and the associated timing
1446 will be reported. The extra details comes at the cost of a slower command
1454 will be reported. The extra details comes at the cost of a slower command
1447 execution.
1455 execution.
1448
1456
1449 Since the rename detection is only run once, other factors might easily
1457 Since the rename detection is only run once, other factors might easily
1450 affect the precision of the timing. However it should give a good
1458 affect the precision of the timing. However it should give a good
1451 approximation of which revision pairs are very costly.
1459 approximation of which revision pairs are very costly.
1452 """
1460 """
1453 opts = _byteskwargs(opts)
1461 opts = _byteskwargs(opts)
1454 fm = ui.formatter(b'perf', opts)
1462 fm = ui.formatter(b'perf', opts)
1455 dotiming = opts[b'timing']
1463 dotiming = opts[b'timing']
1456
1464
1457 if dotiming:
1465 if dotiming:
1458 header = '%12s %12s %12s %12s %12s %12s\n'
1466 header = '%12s %12s %12s %12s %12s %12s\n'
1459 output = ("%(source)12s %(destination)12s "
1467 output = ("%(source)12s %(destination)12s "
1460 "%(nbrevs)12d %(nbmissingfiles)12d "
1468 "%(nbrevs)12d %(nbmissingfiles)12d "
1461 "%(nbrenamedfiles)12d %(time)18.5f\n")
1469 "%(nbrenamedfiles)12d %(time)18.5f\n")
1462 header_names = ("source", "destination", "nb-revs", "nb-files",
1470 header_names = ("source", "destination", "nb-revs", "nb-files",
1463 "nb-renames", "time")
1471 "nb-renames", "time")
1464 fm.plain(header % header_names)
1472 fm.plain(header % header_names)
1465 else:
1473 else:
1466 header = '%12s %12s %12s %12s\n'
1474 header = '%12s %12s %12s %12s\n'
1467 output = ("%(source)12s %(destination)12s "
1475 output = ("%(source)12s %(destination)12s "
1468 "%(nbrevs)12d %(nbmissingfiles)12d\n")
1476 "%(nbrevs)12d %(nbmissingfiles)12d\n")
1469 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
1477 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
1470
1478
1471 if not revs:
1479 if not revs:
1472 revs = ['all()']
1480 revs = ['all()']
1473 revs = scmutil.revrange(repo, revs)
1481 revs = scmutil.revrange(repo, revs)
1474
1482
1475 roi = repo.revs('merge() and %ld', revs)
1483 roi = repo.revs('merge() and %ld', revs)
1476 for r in roi:
1484 for r in roi:
1477 ctx = repo[r]
1485 ctx = repo[r]
1478 p1 = ctx.p1().rev()
1486 p1 = ctx.p1().rev()
1479 p2 = ctx.p2().rev()
1487 p2 = ctx.p2().rev()
1480 bases = repo.changelog._commonancestorsheads(p1, p2)
1488 bases = repo.changelog._commonancestorsheads(p1, p2)
1481 for p in (p1, p2):
1489 for p in (p1, p2):
1482 for b in bases:
1490 for b in bases:
1483 base = repo[b]
1491 base = repo[b]
1484 parent = repo[p]
1492 parent = repo[p]
1485 missing = copies._computeforwardmissing(base, parent)
1493 missing = copies._computeforwardmissing(base, parent)
1486 if not missing:
1494 if not missing:
1487 continue
1495 continue
1488 data = {
1496 data = {
1489 b'source': base.hex(),
1497 b'source': base.hex(),
1490 b'destination': parent.hex(),
1498 b'destination': parent.hex(),
1491 b'nbrevs': len(repo.revs('%d::%d', b, p)),
1499 b'nbrevs': len(repo.revs('%d::%d', b, p)),
1492 b'nbmissingfiles': len(missing),
1500 b'nbmissingfiles': len(missing),
1493 }
1501 }
1494 if dotiming:
1502 if dotiming:
1495 begin = util.timer()
1503 begin = util.timer()
1496 renames = copies.pathcopies(base, parent)
1504 renames = copies.pathcopies(base, parent)
1497 end = util.timer()
1505 end = util.timer()
1498 # not very stable timing since we did only one run
1506 # not very stable timing since we did only one run
1499 data['time'] = end - begin
1507 data['time'] = end - begin
1500 data['nbrenamedfiles'] = len(renames)
1508 data['nbrenamedfiles'] = len(renames)
1501 fm.startitem()
1509 fm.startitem()
1502 fm.data(**data)
1510 fm.data(**data)
1503 out = data.copy()
1511 out = data.copy()
1504 out['source'] = fm.hexfunc(base.node())
1512 out['source'] = fm.hexfunc(base.node())
1505 out['destination'] = fm.hexfunc(parent.node())
1513 out['destination'] = fm.hexfunc(parent.node())
1506 fm.plain(output % out)
1514 fm.plain(output % out)
1507
1515
1508 fm.end()
1516 fm.end()
1509
1517
1510 @command(b'perfcca', formatteropts)
1518 @command(b'perfcca', formatteropts)
1511 def perfcca(ui, repo, **opts):
1519 def perfcca(ui, repo, **opts):
1512 opts = _byteskwargs(opts)
1520 opts = _byteskwargs(opts)
1513 timer, fm = gettimer(ui, opts)
1521 timer, fm = gettimer(ui, opts)
1514 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1522 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1515 fm.end()
1523 fm.end()
1516
1524
1517 @command(b'perffncacheload', formatteropts)
1525 @command(b'perffncacheload', formatteropts)
1518 def perffncacheload(ui, repo, **opts):
1526 def perffncacheload(ui, repo, **opts):
1519 opts = _byteskwargs(opts)
1527 opts = _byteskwargs(opts)
1520 timer, fm = gettimer(ui, opts)
1528 timer, fm = gettimer(ui, opts)
1521 s = repo.store
1529 s = repo.store
1522 def d():
1530 def d():
1523 s.fncache._load()
1531 s.fncache._load()
1524 timer(d)
1532 timer(d)
1525 fm.end()
1533 fm.end()
1526
1534
1527 @command(b'perffncachewrite', formatteropts)
1535 @command(b'perffncachewrite', formatteropts)
1528 def perffncachewrite(ui, repo, **opts):
1536 def perffncachewrite(ui, repo, **opts):
1529 opts = _byteskwargs(opts)
1537 opts = _byteskwargs(opts)
1530 timer, fm = gettimer(ui, opts)
1538 timer, fm = gettimer(ui, opts)
1531 s = repo.store
1539 s = repo.store
1532 lock = repo.lock()
1540 lock = repo.lock()
1533 s.fncache._load()
1541 s.fncache._load()
1534 tr = repo.transaction(b'perffncachewrite')
1542 tr = repo.transaction(b'perffncachewrite')
1535 tr.addbackup(b'fncache')
1543 tr.addbackup(b'fncache')
1536 def d():
1544 def d():
1537 s.fncache._dirty = True
1545 s.fncache._dirty = True
1538 s.fncache.write(tr)
1546 s.fncache.write(tr)
1539 timer(d)
1547 timer(d)
1540 tr.close()
1548 tr.close()
1541 lock.release()
1549 lock.release()
1542 fm.end()
1550 fm.end()
1543
1551
1544 @command(b'perffncacheencode', formatteropts)
1552 @command(b'perffncacheencode', formatteropts)
1545 def perffncacheencode(ui, repo, **opts):
1553 def perffncacheencode(ui, repo, **opts):
1546 opts = _byteskwargs(opts)
1554 opts = _byteskwargs(opts)
1547 timer, fm = gettimer(ui, opts)
1555 timer, fm = gettimer(ui, opts)
1548 s = repo.store
1556 s = repo.store
1549 s.fncache._load()
1557 s.fncache._load()
1550 def d():
1558 def d():
1551 for p in s.fncache.entries:
1559 for p in s.fncache.entries:
1552 s.encode(p)
1560 s.encode(p)
1553 timer(d)
1561 timer(d)
1554 fm.end()
1562 fm.end()
1555
1563
1556 def _bdiffworker(q, blocks, xdiff, ready, done):
1564 def _bdiffworker(q, blocks, xdiff, ready, done):
1557 while not done.is_set():
1565 while not done.is_set():
1558 pair = q.get()
1566 pair = q.get()
1559 while pair is not None:
1567 while pair is not None:
1560 if xdiff:
1568 if xdiff:
1561 mdiff.bdiff.xdiffblocks(*pair)
1569 mdiff.bdiff.xdiffblocks(*pair)
1562 elif blocks:
1570 elif blocks:
1563 mdiff.bdiff.blocks(*pair)
1571 mdiff.bdiff.blocks(*pair)
1564 else:
1572 else:
1565 mdiff.textdiff(*pair)
1573 mdiff.textdiff(*pair)
1566 q.task_done()
1574 q.task_done()
1567 pair = q.get()
1575 pair = q.get()
1568 q.task_done() # for the None one
1576 q.task_done() # for the None one
1569 with ready:
1577 with ready:
1570 ready.wait()
1578 ready.wait()
1571
1579
1572 def _manifestrevision(repo, mnode):
1580 def _manifestrevision(repo, mnode):
1573 ml = repo.manifestlog
1581 ml = repo.manifestlog
1574
1582
1575 if util.safehasattr(ml, b'getstorage'):
1583 if util.safehasattr(ml, b'getstorage'):
1576 store = ml.getstorage(b'')
1584 store = ml.getstorage(b'')
1577 else:
1585 else:
1578 store = ml._revlog
1586 store = ml._revlog
1579
1587
1580 return store.revision(mnode)
1588 return store.revision(mnode)
1581
1589
1582 @command(b'perfbdiff', revlogopts + formatteropts + [
1590 @command(b'perfbdiff', revlogopts + formatteropts + [
1583 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1591 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1584 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1592 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1585 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1593 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1586 (b'', b'blocks', False, b'test computing diffs into blocks'),
1594 (b'', b'blocks', False, b'test computing diffs into blocks'),
1587 (b'', b'xdiff', False, b'use xdiff algorithm'),
1595 (b'', b'xdiff', False, b'use xdiff algorithm'),
1588 ],
1596 ],
1589
1597
1590 b'-c|-m|FILE REV')
1598 b'-c|-m|FILE REV')
1591 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1599 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1592 """benchmark a bdiff between revisions
1600 """benchmark a bdiff between revisions
1593
1601
1594 By default, benchmark a bdiff between its delta parent and itself.
1602 By default, benchmark a bdiff between its delta parent and itself.
1595
1603
1596 With ``--count``, benchmark bdiffs between delta parents and self for N
1604 With ``--count``, benchmark bdiffs between delta parents and self for N
1597 revisions starting at the specified revision.
1605 revisions starting at the specified revision.
1598
1606
1599 With ``--alldata``, assume the requested revision is a changeset and
1607 With ``--alldata``, assume the requested revision is a changeset and
1600 measure bdiffs for all changes related to that changeset (manifest
1608 measure bdiffs for all changes related to that changeset (manifest
1601 and filelogs).
1609 and filelogs).
1602 """
1610 """
1603 opts = _byteskwargs(opts)
1611 opts = _byteskwargs(opts)
1604
1612
1605 if opts[b'xdiff'] and not opts[b'blocks']:
1613 if opts[b'xdiff'] and not opts[b'blocks']:
1606 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1614 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1607
1615
1608 if opts[b'alldata']:
1616 if opts[b'alldata']:
1609 opts[b'changelog'] = True
1617 opts[b'changelog'] = True
1610
1618
1611 if opts.get(b'changelog') or opts.get(b'manifest'):
1619 if opts.get(b'changelog') or opts.get(b'manifest'):
1612 file_, rev = None, file_
1620 file_, rev = None, file_
1613 elif rev is None:
1621 elif rev is None:
1614 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1622 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1615
1623
1616 blocks = opts[b'blocks']
1624 blocks = opts[b'blocks']
1617 xdiff = opts[b'xdiff']
1625 xdiff = opts[b'xdiff']
1618 textpairs = []
1626 textpairs = []
1619
1627
1620 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1628 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1621
1629
1622 startrev = r.rev(r.lookup(rev))
1630 startrev = r.rev(r.lookup(rev))
1623 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1631 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1624 if opts[b'alldata']:
1632 if opts[b'alldata']:
1625 # Load revisions associated with changeset.
1633 # Load revisions associated with changeset.
1626 ctx = repo[rev]
1634 ctx = repo[rev]
1627 mtext = _manifestrevision(repo, ctx.manifestnode())
1635 mtext = _manifestrevision(repo, ctx.manifestnode())
1628 for pctx in ctx.parents():
1636 for pctx in ctx.parents():
1629 pman = _manifestrevision(repo, pctx.manifestnode())
1637 pman = _manifestrevision(repo, pctx.manifestnode())
1630 textpairs.append((pman, mtext))
1638 textpairs.append((pman, mtext))
1631
1639
1632 # Load filelog revisions by iterating manifest delta.
1640 # Load filelog revisions by iterating manifest delta.
1633 man = ctx.manifest()
1641 man = ctx.manifest()
1634 pman = ctx.p1().manifest()
1642 pman = ctx.p1().manifest()
1635 for filename, change in pman.diff(man).items():
1643 for filename, change in pman.diff(man).items():
1636 fctx = repo.file(filename)
1644 fctx = repo.file(filename)
1637 f1 = fctx.revision(change[0][0] or -1)
1645 f1 = fctx.revision(change[0][0] or -1)
1638 f2 = fctx.revision(change[1][0] or -1)
1646 f2 = fctx.revision(change[1][0] or -1)
1639 textpairs.append((f1, f2))
1647 textpairs.append((f1, f2))
1640 else:
1648 else:
1641 dp = r.deltaparent(rev)
1649 dp = r.deltaparent(rev)
1642 textpairs.append((r.revision(dp), r.revision(rev)))
1650 textpairs.append((r.revision(dp), r.revision(rev)))
1643
1651
1644 withthreads = threads > 0
1652 withthreads = threads > 0
1645 if not withthreads:
1653 if not withthreads:
1646 def d():
1654 def d():
1647 for pair in textpairs:
1655 for pair in textpairs:
1648 if xdiff:
1656 if xdiff:
1649 mdiff.bdiff.xdiffblocks(*pair)
1657 mdiff.bdiff.xdiffblocks(*pair)
1650 elif blocks:
1658 elif blocks:
1651 mdiff.bdiff.blocks(*pair)
1659 mdiff.bdiff.blocks(*pair)
1652 else:
1660 else:
1653 mdiff.textdiff(*pair)
1661 mdiff.textdiff(*pair)
1654 else:
1662 else:
1655 q = queue()
1663 q = queue()
1656 for i in _xrange(threads):
1664 for i in _xrange(threads):
1657 q.put(None)
1665 q.put(None)
1658 ready = threading.Condition()
1666 ready = threading.Condition()
1659 done = threading.Event()
1667 done = threading.Event()
1660 for i in _xrange(threads):
1668 for i in _xrange(threads):
1661 threading.Thread(target=_bdiffworker,
1669 threading.Thread(target=_bdiffworker,
1662 args=(q, blocks, xdiff, ready, done)).start()
1670 args=(q, blocks, xdiff, ready, done)).start()
1663 q.join()
1671 q.join()
1664 def d():
1672 def d():
1665 for pair in textpairs:
1673 for pair in textpairs:
1666 q.put(pair)
1674 q.put(pair)
1667 for i in _xrange(threads):
1675 for i in _xrange(threads):
1668 q.put(None)
1676 q.put(None)
1669 with ready:
1677 with ready:
1670 ready.notify_all()
1678 ready.notify_all()
1671 q.join()
1679 q.join()
1672 timer, fm = gettimer(ui, opts)
1680 timer, fm = gettimer(ui, opts)
1673 timer(d)
1681 timer(d)
1674 fm.end()
1682 fm.end()
1675
1683
1676 if withthreads:
1684 if withthreads:
1677 done.set()
1685 done.set()
1678 for i in _xrange(threads):
1686 for i in _xrange(threads):
1679 q.put(None)
1687 q.put(None)
1680 with ready:
1688 with ready:
1681 ready.notify_all()
1689 ready.notify_all()
1682
1690
1683 @command(b'perfunidiff', revlogopts + formatteropts + [
1691 @command(b'perfunidiff', revlogopts + formatteropts + [
1684 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1692 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1685 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
1693 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
1686 ], b'-c|-m|FILE REV')
1694 ], b'-c|-m|FILE REV')
1687 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1695 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1688 """benchmark a unified diff between revisions
1696 """benchmark a unified diff between revisions
1689
1697
1690 This doesn't include any copy tracing - it's just a unified diff
1698 This doesn't include any copy tracing - it's just a unified diff
1691 of the texts.
1699 of the texts.
1692
1700
1693 By default, benchmark a diff between its delta parent and itself.
1701 By default, benchmark a diff between its delta parent and itself.
1694
1702
1695 With ``--count``, benchmark diffs between delta parents and self for N
1703 With ``--count``, benchmark diffs between delta parents and self for N
1696 revisions starting at the specified revision.
1704 revisions starting at the specified revision.
1697
1705
1698 With ``--alldata``, assume the requested revision is a changeset and
1706 With ``--alldata``, assume the requested revision is a changeset and
1699 measure diffs for all changes related to that changeset (manifest
1707 measure diffs for all changes related to that changeset (manifest
1700 and filelogs).
1708 and filelogs).
1701 """
1709 """
1702 opts = _byteskwargs(opts)
1710 opts = _byteskwargs(opts)
1703 if opts[b'alldata']:
1711 if opts[b'alldata']:
1704 opts[b'changelog'] = True
1712 opts[b'changelog'] = True
1705
1713
1706 if opts.get(b'changelog') or opts.get(b'manifest'):
1714 if opts.get(b'changelog') or opts.get(b'manifest'):
1707 file_, rev = None, file_
1715 file_, rev = None, file_
1708 elif rev is None:
1716 elif rev is None:
1709 raise error.CommandError(b'perfunidiff', b'invalid arguments')
1717 raise error.CommandError(b'perfunidiff', b'invalid arguments')
1710
1718
1711 textpairs = []
1719 textpairs = []
1712
1720
1713 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
1721 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
1714
1722
1715 startrev = r.rev(r.lookup(rev))
1723 startrev = r.rev(r.lookup(rev))
1716 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1724 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1717 if opts[b'alldata']:
1725 if opts[b'alldata']:
1718 # Load revisions associated with changeset.
1726 # Load revisions associated with changeset.
1719 ctx = repo[rev]
1727 ctx = repo[rev]
1720 mtext = _manifestrevision(repo, ctx.manifestnode())
1728 mtext = _manifestrevision(repo, ctx.manifestnode())
1721 for pctx in ctx.parents():
1729 for pctx in ctx.parents():
1722 pman = _manifestrevision(repo, pctx.manifestnode())
1730 pman = _manifestrevision(repo, pctx.manifestnode())
1723 textpairs.append((pman, mtext))
1731 textpairs.append((pman, mtext))
1724
1732
1725 # Load filelog revisions by iterating manifest delta.
1733 # Load filelog revisions by iterating manifest delta.
1726 man = ctx.manifest()
1734 man = ctx.manifest()
1727 pman = ctx.p1().manifest()
1735 pman = ctx.p1().manifest()
1728 for filename, change in pman.diff(man).items():
1736 for filename, change in pman.diff(man).items():
1729 fctx = repo.file(filename)
1737 fctx = repo.file(filename)
1730 f1 = fctx.revision(change[0][0] or -1)
1738 f1 = fctx.revision(change[0][0] or -1)
1731 f2 = fctx.revision(change[1][0] or -1)
1739 f2 = fctx.revision(change[1][0] or -1)
1732 textpairs.append((f1, f2))
1740 textpairs.append((f1, f2))
1733 else:
1741 else:
1734 dp = r.deltaparent(rev)
1742 dp = r.deltaparent(rev)
1735 textpairs.append((r.revision(dp), r.revision(rev)))
1743 textpairs.append((r.revision(dp), r.revision(rev)))
1736
1744
1737 def d():
1745 def d():
1738 for left, right in textpairs:
1746 for left, right in textpairs:
1739 # The date strings don't matter, so we pass empty strings.
1747 # The date strings don't matter, so we pass empty strings.
1740 headerlines, hunks = mdiff.unidiff(
1748 headerlines, hunks = mdiff.unidiff(
1741 left, b'', right, b'', b'left', b'right', binary=False)
1749 left, b'', right, b'', b'left', b'right', binary=False)
1742 # consume iterators in roughly the way patch.py does
1750 # consume iterators in roughly the way patch.py does
1743 b'\n'.join(headerlines)
1751 b'\n'.join(headerlines)
1744 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1752 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1745 timer, fm = gettimer(ui, opts)
1753 timer, fm = gettimer(ui, opts)
1746 timer(d)
1754 timer(d)
1747 fm.end()
1755 fm.end()
1748
1756
1749 @command(b'perfdiffwd', formatteropts)
1757 @command(b'perfdiffwd', formatteropts)
1750 def perfdiffwd(ui, repo, **opts):
1758 def perfdiffwd(ui, repo, **opts):
1751 """Profile diff of working directory changes"""
1759 """Profile diff of working directory changes"""
1752 opts = _byteskwargs(opts)
1760 opts = _byteskwargs(opts)
1753 timer, fm = gettimer(ui, opts)
1761 timer, fm = gettimer(ui, opts)
1754 options = {
1762 options = {
1755 'w': 'ignore_all_space',
1763 'w': 'ignore_all_space',
1756 'b': 'ignore_space_change',
1764 'b': 'ignore_space_change',
1757 'B': 'ignore_blank_lines',
1765 'B': 'ignore_blank_lines',
1758 }
1766 }
1759
1767
1760 for diffopt in ('', 'w', 'b', 'B', 'wB'):
1768 for diffopt in ('', 'w', 'b', 'B', 'wB'):
1761 opts = dict((options[c], b'1') for c in diffopt)
1769 opts = dict((options[c], b'1') for c in diffopt)
1762 def d():
1770 def d():
1763 ui.pushbuffer()
1771 ui.pushbuffer()
1764 commands.diff(ui, repo, **opts)
1772 commands.diff(ui, repo, **opts)
1765 ui.popbuffer()
1773 ui.popbuffer()
1766 diffopt = diffopt.encode('ascii')
1774 diffopt = diffopt.encode('ascii')
1767 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
1775 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
1768 timer(d, title=title)
1776 timer(d, title=title)
1769 fm.end()
1777 fm.end()
1770
1778
1771 @command(b'perfrevlogindex', revlogopts + formatteropts,
1779 @command(b'perfrevlogindex', revlogopts + formatteropts,
1772 b'-c|-m|FILE')
1780 b'-c|-m|FILE')
1773 def perfrevlogindex(ui, repo, file_=None, **opts):
1781 def perfrevlogindex(ui, repo, file_=None, **opts):
1774 """Benchmark operations against a revlog index.
1782 """Benchmark operations against a revlog index.
1775
1783
1776 This tests constructing a revlog instance, reading index data,
1784 This tests constructing a revlog instance, reading index data,
1777 parsing index data, and performing various operations related to
1785 parsing index data, and performing various operations related to
1778 index data.
1786 index data.
1779 """
1787 """
1780
1788
1781 opts = _byteskwargs(opts)
1789 opts = _byteskwargs(opts)
1782
1790
1783 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
1791 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
1784
1792
1785 opener = getattr(rl, 'opener') # trick linter
1793 opener = getattr(rl, 'opener') # trick linter
1786 indexfile = rl.indexfile
1794 indexfile = rl.indexfile
1787 data = opener.read(indexfile)
1795 data = opener.read(indexfile)
1788
1796
1789 header = struct.unpack(b'>I', data[0:4])[0]
1797 header = struct.unpack(b'>I', data[0:4])[0]
1790 version = header & 0xFFFF
1798 version = header & 0xFFFF
1791 if version == 1:
1799 if version == 1:
1792 revlogio = revlog.revlogio()
1800 revlogio = revlog.revlogio()
1793 inline = header & (1 << 16)
1801 inline = header & (1 << 16)
1794 else:
1802 else:
1795 raise error.Abort((b'unsupported revlog version: %d') % version)
1803 raise error.Abort((b'unsupported revlog version: %d') % version)
1796
1804
1797 rllen = len(rl)
1805 rllen = len(rl)
1798
1806
1799 node0 = rl.node(0)
1807 node0 = rl.node(0)
1800 node25 = rl.node(rllen // 4)
1808 node25 = rl.node(rllen // 4)
1801 node50 = rl.node(rllen // 2)
1809 node50 = rl.node(rllen // 2)
1802 node75 = rl.node(rllen // 4 * 3)
1810 node75 = rl.node(rllen // 4 * 3)
1803 node100 = rl.node(rllen - 1)
1811 node100 = rl.node(rllen - 1)
1804
1812
1805 allrevs = range(rllen)
1813 allrevs = range(rllen)
1806 allrevsrev = list(reversed(allrevs))
1814 allrevsrev = list(reversed(allrevs))
1807 allnodes = [rl.node(rev) for rev in range(rllen)]
1815 allnodes = [rl.node(rev) for rev in range(rllen)]
1808 allnodesrev = list(reversed(allnodes))
1816 allnodesrev = list(reversed(allnodes))
1809
1817
1810 def constructor():
1818 def constructor():
1811 revlog.revlog(opener, indexfile)
1819 revlog.revlog(opener, indexfile)
1812
1820
1813 def read():
1821 def read():
1814 with opener(indexfile) as fh:
1822 with opener(indexfile) as fh:
1815 fh.read()
1823 fh.read()
1816
1824
1817 def parseindex():
1825 def parseindex():
1818 revlogio.parseindex(data, inline)
1826 revlogio.parseindex(data, inline)
1819
1827
1820 def getentry(revornode):
1828 def getentry(revornode):
1821 index = revlogio.parseindex(data, inline)[0]
1829 index = revlogio.parseindex(data, inline)[0]
1822 index[revornode]
1830 index[revornode]
1823
1831
1824 def getentries(revs, count=1):
1832 def getentries(revs, count=1):
1825 index = revlogio.parseindex(data, inline)[0]
1833 index = revlogio.parseindex(data, inline)[0]
1826
1834
1827 for i in range(count):
1835 for i in range(count):
1828 for rev in revs:
1836 for rev in revs:
1829 index[rev]
1837 index[rev]
1830
1838
1831 def resolvenode(node):
1839 def resolvenode(node):
1832 nodemap = revlogio.parseindex(data, inline)[1]
1840 nodemap = revlogio.parseindex(data, inline)[1]
1833 # This only works for the C code.
1841 # This only works for the C code.
1834 if nodemap is None:
1842 if nodemap is None:
1835 return
1843 return
1836
1844
1837 try:
1845 try:
1838 nodemap[node]
1846 nodemap[node]
1839 except error.RevlogError:
1847 except error.RevlogError:
1840 pass
1848 pass
1841
1849
1842 def resolvenodes(nodes, count=1):
1850 def resolvenodes(nodes, count=1):
1843 nodemap = revlogio.parseindex(data, inline)[1]
1851 nodemap = revlogio.parseindex(data, inline)[1]
1844 if nodemap is None:
1852 if nodemap is None:
1845 return
1853 return
1846
1854
1847 for i in range(count):
1855 for i in range(count):
1848 for node in nodes:
1856 for node in nodes:
1849 try:
1857 try:
1850 nodemap[node]
1858 nodemap[node]
1851 except error.RevlogError:
1859 except error.RevlogError:
1852 pass
1860 pass
1853
1861
1854 benches = [
1862 benches = [
1855 (constructor, b'revlog constructor'),
1863 (constructor, b'revlog constructor'),
1856 (read, b'read'),
1864 (read, b'read'),
1857 (parseindex, b'create index object'),
1865 (parseindex, b'create index object'),
1858 (lambda: getentry(0), b'retrieve index entry for rev 0'),
1866 (lambda: getentry(0), b'retrieve index entry for rev 0'),
1859 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
1867 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
1860 (lambda: resolvenode(node0), b'look up node at rev 0'),
1868 (lambda: resolvenode(node0), b'look up node at rev 0'),
1861 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
1869 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
1862 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
1870 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
1863 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
1871 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
1864 (lambda: resolvenode(node100), b'look up node at tip'),
1872 (lambda: resolvenode(node100), b'look up node at tip'),
1865 # 2x variation is to measure caching impact.
1873 # 2x variation is to measure caching impact.
1866 (lambda: resolvenodes(allnodes),
1874 (lambda: resolvenodes(allnodes),
1867 b'look up all nodes (forward)'),
1875 b'look up all nodes (forward)'),
1868 (lambda: resolvenodes(allnodes, 2),
1876 (lambda: resolvenodes(allnodes, 2),
1869 b'look up all nodes 2x (forward)'),
1877 b'look up all nodes 2x (forward)'),
1870 (lambda: resolvenodes(allnodesrev),
1878 (lambda: resolvenodes(allnodesrev),
1871 b'look up all nodes (reverse)'),
1879 b'look up all nodes (reverse)'),
1872 (lambda: resolvenodes(allnodesrev, 2),
1880 (lambda: resolvenodes(allnodesrev, 2),
1873 b'look up all nodes 2x (reverse)'),
1881 b'look up all nodes 2x (reverse)'),
1874 (lambda: getentries(allrevs),
1882 (lambda: getentries(allrevs),
1875 b'retrieve all index entries (forward)'),
1883 b'retrieve all index entries (forward)'),
1876 (lambda: getentries(allrevs, 2),
1884 (lambda: getentries(allrevs, 2),
1877 b'retrieve all index entries 2x (forward)'),
1885 b'retrieve all index entries 2x (forward)'),
1878 (lambda: getentries(allrevsrev),
1886 (lambda: getentries(allrevsrev),
1879 b'retrieve all index entries (reverse)'),
1887 b'retrieve all index entries (reverse)'),
1880 (lambda: getentries(allrevsrev, 2),
1888 (lambda: getentries(allrevsrev, 2),
1881 b'retrieve all index entries 2x (reverse)'),
1889 b'retrieve all index entries 2x (reverse)'),
1882 ]
1890 ]
1883
1891
1884 for fn, title in benches:
1892 for fn, title in benches:
1885 timer, fm = gettimer(ui, opts)
1893 timer, fm = gettimer(ui, opts)
1886 timer(fn, title=title)
1894 timer(fn, title=title)
1887 fm.end()
1895 fm.end()
1888
1896
1889 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
1897 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
1890 [(b'd', b'dist', 100, b'distance between the revisions'),
1898 [(b'd', b'dist', 100, b'distance between the revisions'),
1891 (b's', b'startrev', 0, b'revision to start reading at'),
1899 (b's', b'startrev', 0, b'revision to start reading at'),
1892 (b'', b'reverse', False, b'read in reverse')],
1900 (b'', b'reverse', False, b'read in reverse')],
1893 b'-c|-m|FILE')
1901 b'-c|-m|FILE')
1894 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
1902 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
1895 **opts):
1903 **opts):
1896 """Benchmark reading a series of revisions from a revlog.
1904 """Benchmark reading a series of revisions from a revlog.
1897
1905
1898 By default, we read every ``-d/--dist`` revision from 0 to tip of
1906 By default, we read every ``-d/--dist`` revision from 0 to tip of
1899 the specified revlog.
1907 the specified revlog.
1900
1908
1901 The start revision can be defined via ``-s/--startrev``.
1909 The start revision can be defined via ``-s/--startrev``.
1902 """
1910 """
1903 opts = _byteskwargs(opts)
1911 opts = _byteskwargs(opts)
1904
1912
1905 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
1913 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
1906 rllen = getlen(ui)(rl)
1914 rllen = getlen(ui)(rl)
1907
1915
1908 if startrev < 0:
1916 if startrev < 0:
1909 startrev = rllen + startrev
1917 startrev = rllen + startrev
1910
1918
1911 def d():
1919 def d():
1912 rl.clearcaches()
1920 rl.clearcaches()
1913
1921
1914 beginrev = startrev
1922 beginrev = startrev
1915 endrev = rllen
1923 endrev = rllen
1916 dist = opts[b'dist']
1924 dist = opts[b'dist']
1917
1925
1918 if reverse:
1926 if reverse:
1919 beginrev, endrev = endrev - 1, beginrev - 1
1927 beginrev, endrev = endrev - 1, beginrev - 1
1920 dist = -1 * dist
1928 dist = -1 * dist
1921
1929
1922 for x in _xrange(beginrev, endrev, dist):
1930 for x in _xrange(beginrev, endrev, dist):
1923 # Old revisions don't support passing int.
1931 # Old revisions don't support passing int.
1924 n = rl.node(x)
1932 n = rl.node(x)
1925 rl.revision(n)
1933 rl.revision(n)
1926
1934
1927 timer, fm = gettimer(ui, opts)
1935 timer, fm = gettimer(ui, opts)
1928 timer(d)
1936 timer(d)
1929 fm.end()
1937 fm.end()
1930
1938
1931 @command(b'perfrevlogwrite', revlogopts + formatteropts +
1939 @command(b'perfrevlogwrite', revlogopts + formatteropts +
1932 [(b's', b'startrev', 1000, b'revision to start writing at'),
1940 [(b's', b'startrev', 1000, b'revision to start writing at'),
1933 (b'', b'stoprev', -1, b'last revision to write'),
1941 (b'', b'stoprev', -1, b'last revision to write'),
1934 (b'', b'count', 3, b'last revision to write'),
1942 (b'', b'count', 3, b'last revision to write'),
1935 (b'', b'details', False, b'print timing for every revisions tested'),
1943 (b'', b'details', False, b'print timing for every revisions tested'),
1936 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
1944 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
1937 (b'', b'lazydeltabase', True, b'try the provided delta first'),
1945 (b'', b'lazydeltabase', True, b'try the provided delta first'),
1938 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1946 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1939 ],
1947 ],
1940 b'-c|-m|FILE')
1948 b'-c|-m|FILE')
1941 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
1949 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
1942 """Benchmark writing a series of revisions to a revlog.
1950 """Benchmark writing a series of revisions to a revlog.
1943
1951
1944 Possible source values are:
1952 Possible source values are:
1945 * `full`: add from a full text (default).
1953 * `full`: add from a full text (default).
1946 * `parent-1`: add from a delta to the first parent
1954 * `parent-1`: add from a delta to the first parent
1947 * `parent-2`: add from a delta to the second parent if it exists
1955 * `parent-2`: add from a delta to the second parent if it exists
1948 (use a delta from the first parent otherwise)
1956 (use a delta from the first parent otherwise)
1949 * `parent-smallest`: add from the smallest delta (either p1 or p2)
1957 * `parent-smallest`: add from the smallest delta (either p1 or p2)
1950 * `storage`: add from the existing precomputed deltas
1958 * `storage`: add from the existing precomputed deltas
1951 """
1959 """
1952 opts = _byteskwargs(opts)
1960 opts = _byteskwargs(opts)
1953
1961
1954 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
1962 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
1955 rllen = getlen(ui)(rl)
1963 rllen = getlen(ui)(rl)
1956 if startrev < 0:
1964 if startrev < 0:
1957 startrev = rllen + startrev
1965 startrev = rllen + startrev
1958 if stoprev < 0:
1966 if stoprev < 0:
1959 stoprev = rllen + stoprev
1967 stoprev = rllen + stoprev
1960
1968
1961 lazydeltabase = opts['lazydeltabase']
1969 lazydeltabase = opts['lazydeltabase']
1962 source = opts['source']
1970 source = opts['source']
1963 clearcaches = opts['clear_caches']
1971 clearcaches = opts['clear_caches']
1964 validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest',
1972 validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest',
1965 b'storage')
1973 b'storage')
1966 if source not in validsource:
1974 if source not in validsource:
1967 raise error.Abort('invalid source type: %s' % source)
1975 raise error.Abort('invalid source type: %s' % source)
1968
1976
1969 ### actually gather results
1977 ### actually gather results
1970 count = opts['count']
1978 count = opts['count']
1971 if count <= 0:
1979 if count <= 0:
1972 raise error.Abort('invalide run count: %d' % count)
1980 raise error.Abort('invalide run count: %d' % count)
1973 allresults = []
1981 allresults = []
1974 for c in range(count):
1982 for c in range(count):
1975 timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1,
1983 timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1,
1976 lazydeltabase=lazydeltabase,
1984 lazydeltabase=lazydeltabase,
1977 clearcaches=clearcaches)
1985 clearcaches=clearcaches)
1978 allresults.append(timing)
1986 allresults.append(timing)
1979
1987
1980 ### consolidate the results in a single list
1988 ### consolidate the results in a single list
1981 results = []
1989 results = []
1982 for idx, (rev, t) in enumerate(allresults[0]):
1990 for idx, (rev, t) in enumerate(allresults[0]):
1983 ts = [t]
1991 ts = [t]
1984 for other in allresults[1:]:
1992 for other in allresults[1:]:
1985 orev, ot = other[idx]
1993 orev, ot = other[idx]
1986 assert orev == rev
1994 assert orev == rev
1987 ts.append(ot)
1995 ts.append(ot)
1988 results.append((rev, ts))
1996 results.append((rev, ts))
1989 resultcount = len(results)
1997 resultcount = len(results)
1990
1998
1991 ### Compute and display relevant statistics
1999 ### Compute and display relevant statistics
1992
2000
1993 # get a formatter
2001 # get a formatter
1994 fm = ui.formatter(b'perf', opts)
2002 fm = ui.formatter(b'perf', opts)
1995 displayall = ui.configbool(b"perf", b"all-timing", False)
2003 displayall = ui.configbool(b"perf", b"all-timing", False)
1996
2004
1997 # print individual details if requested
2005 # print individual details if requested
1998 if opts['details']:
2006 if opts['details']:
1999 for idx, item in enumerate(results, 1):
2007 for idx, item in enumerate(results, 1):
2000 rev, data = item
2008 rev, data = item
2001 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
2009 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
2002 formatone(fm, data, title=title, displayall=displayall)
2010 formatone(fm, data, title=title, displayall=displayall)
2003
2011
2004 # sorts results by median time
2012 # sorts results by median time
2005 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
2013 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
2006 # list of (name, index) to display)
2014 # list of (name, index) to display)
2007 relevants = [
2015 relevants = [
2008 ("min", 0),
2016 ("min", 0),
2009 ("10%", resultcount * 10 // 100),
2017 ("10%", resultcount * 10 // 100),
2010 ("25%", resultcount * 25 // 100),
2018 ("25%", resultcount * 25 // 100),
2011 ("50%", resultcount * 70 // 100),
2019 ("50%", resultcount * 70 // 100),
2012 ("75%", resultcount * 75 // 100),
2020 ("75%", resultcount * 75 // 100),
2013 ("90%", resultcount * 90 // 100),
2021 ("90%", resultcount * 90 // 100),
2014 ("95%", resultcount * 95 // 100),
2022 ("95%", resultcount * 95 // 100),
2015 ("99%", resultcount * 99 // 100),
2023 ("99%", resultcount * 99 // 100),
2016 ("99.9%", resultcount * 999 // 1000),
2024 ("99.9%", resultcount * 999 // 1000),
2017 ("99.99%", resultcount * 9999 // 10000),
2025 ("99.99%", resultcount * 9999 // 10000),
2018 ("99.999%", resultcount * 99999 // 100000),
2026 ("99.999%", resultcount * 99999 // 100000),
2019 ("max", -1),
2027 ("max", -1),
2020 ]
2028 ]
2021 if not ui.quiet:
2029 if not ui.quiet:
2022 for name, idx in relevants:
2030 for name, idx in relevants:
2023 data = results[idx]
2031 data = results[idx]
2024 title = '%s of %d, rev %d' % (name, resultcount, data[0])
2032 title = '%s of %d, rev %d' % (name, resultcount, data[0])
2025 formatone(fm, data[1], title=title, displayall=displayall)
2033 formatone(fm, data[1], title=title, displayall=displayall)
2026
2034
2027 # XXX summing that many float will not be very precise, we ignore this fact
2035 # XXX summing that many float will not be very precise, we ignore this fact
2028 # for now
2036 # for now
2029 totaltime = []
2037 totaltime = []
2030 for item in allresults:
2038 for item in allresults:
2031 totaltime.append((sum(x[1][0] for x in item),
2039 totaltime.append((sum(x[1][0] for x in item),
2032 sum(x[1][1] for x in item),
2040 sum(x[1][1] for x in item),
2033 sum(x[1][2] for x in item),)
2041 sum(x[1][2] for x in item),)
2034 )
2042 )
2035 formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
2043 formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
2036 displayall=displayall)
2044 displayall=displayall)
2037 fm.end()
2045 fm.end()
2038
2046
2039 class _faketr(object):
2047 class _faketr(object):
2040 def add(s, x, y, z=None):
2048 def add(s, x, y, z=None):
2041 return None
2049 return None
2042
2050
2043 def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None,
2051 def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None,
2044 lazydeltabase=True, clearcaches=True):
2052 lazydeltabase=True, clearcaches=True):
2045 timings = []
2053 timings = []
2046 tr = _faketr()
2054 tr = _faketr()
2047 with _temprevlog(ui, orig, startrev) as dest:
2055 with _temprevlog(ui, orig, startrev) as dest:
2048 dest._lazydeltabase = lazydeltabase
2056 dest._lazydeltabase = lazydeltabase
2049 revs = list(orig.revs(startrev, stoprev))
2057 revs = list(orig.revs(startrev, stoprev))
2050 total = len(revs)
2058 total = len(revs)
2051 topic = 'adding'
2059 topic = 'adding'
2052 if runidx is not None:
2060 if runidx is not None:
2053 topic += ' (run #%d)' % runidx
2061 topic += ' (run #%d)' % runidx
2054 # Support both old and new progress API
2062 # Support both old and new progress API
2055 if util.safehasattr(ui, 'makeprogress'):
2063 if util.safehasattr(ui, 'makeprogress'):
2056 progress = ui.makeprogress(topic, unit='revs', total=total)
2064 progress = ui.makeprogress(topic, unit='revs', total=total)
2057 def updateprogress(pos):
2065 def updateprogress(pos):
2058 progress.update(pos)
2066 progress.update(pos)
2059 def completeprogress():
2067 def completeprogress():
2060 progress.complete()
2068 progress.complete()
2061 else:
2069 else:
2062 def updateprogress(pos):
2070 def updateprogress(pos):
2063 ui.progress(topic, pos, unit='revs', total=total)
2071 ui.progress(topic, pos, unit='revs', total=total)
2064 def completeprogress():
2072 def completeprogress():
2065 ui.progress(topic, None, unit='revs', total=total)
2073 ui.progress(topic, None, unit='revs', total=total)
2066
2074
2067 for idx, rev in enumerate(revs):
2075 for idx, rev in enumerate(revs):
2068 updateprogress(idx)
2076 updateprogress(idx)
2069 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
2077 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
2070 if clearcaches:
2078 if clearcaches:
2071 dest.index.clearcaches()
2079 dest.index.clearcaches()
2072 dest.clearcaches()
2080 dest.clearcaches()
2073 with timeone() as r:
2081 with timeone() as r:
2074 dest.addrawrevision(*addargs, **addkwargs)
2082 dest.addrawrevision(*addargs, **addkwargs)
2075 timings.append((rev, r[0]))
2083 timings.append((rev, r[0]))
2076 updateprogress(total)
2084 updateprogress(total)
2077 completeprogress()
2085 completeprogress()
2078 return timings
2086 return timings
2079
2087
2080 def _getrevisionseed(orig, rev, tr, source):
2088 def _getrevisionseed(orig, rev, tr, source):
2081 from mercurial.node import nullid
2089 from mercurial.node import nullid
2082
2090
2083 linkrev = orig.linkrev(rev)
2091 linkrev = orig.linkrev(rev)
2084 node = orig.node(rev)
2092 node = orig.node(rev)
2085 p1, p2 = orig.parents(node)
2093 p1, p2 = orig.parents(node)
2086 flags = orig.flags(rev)
2094 flags = orig.flags(rev)
2087 cachedelta = None
2095 cachedelta = None
2088 text = None
2096 text = None
2089
2097
2090 if source == b'full':
2098 if source == b'full':
2091 text = orig.revision(rev)
2099 text = orig.revision(rev)
2092 elif source == b'parent-1':
2100 elif source == b'parent-1':
2093 baserev = orig.rev(p1)
2101 baserev = orig.rev(p1)
2094 cachedelta = (baserev, orig.revdiff(p1, rev))
2102 cachedelta = (baserev, orig.revdiff(p1, rev))
2095 elif source == b'parent-2':
2103 elif source == b'parent-2':
2096 parent = p2
2104 parent = p2
2097 if p2 == nullid:
2105 if p2 == nullid:
2098 parent = p1
2106 parent = p1
2099 baserev = orig.rev(parent)
2107 baserev = orig.rev(parent)
2100 cachedelta = (baserev, orig.revdiff(parent, rev))
2108 cachedelta = (baserev, orig.revdiff(parent, rev))
2101 elif source == b'parent-smallest':
2109 elif source == b'parent-smallest':
2102 p1diff = orig.revdiff(p1, rev)
2110 p1diff = orig.revdiff(p1, rev)
2103 parent = p1
2111 parent = p1
2104 diff = p1diff
2112 diff = p1diff
2105 if p2 != nullid:
2113 if p2 != nullid:
2106 p2diff = orig.revdiff(p2, rev)
2114 p2diff = orig.revdiff(p2, rev)
2107 if len(p1diff) > len(p2diff):
2115 if len(p1diff) > len(p2diff):
2108 parent = p2
2116 parent = p2
2109 diff = p2diff
2117 diff = p2diff
2110 baserev = orig.rev(parent)
2118 baserev = orig.rev(parent)
2111 cachedelta = (baserev, diff)
2119 cachedelta = (baserev, diff)
2112 elif source == b'storage':
2120 elif source == b'storage':
2113 baserev = orig.deltaparent(rev)
2121 baserev = orig.deltaparent(rev)
2114 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
2122 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
2115
2123
2116 return ((text, tr, linkrev, p1, p2),
2124 return ((text, tr, linkrev, p1, p2),
2117 {'node': node, 'flags': flags, 'cachedelta': cachedelta})
2125 {'node': node, 'flags': flags, 'cachedelta': cachedelta})
2118
2126
2119 @contextlib.contextmanager
2127 @contextlib.contextmanager
2120 def _temprevlog(ui, orig, truncaterev):
2128 def _temprevlog(ui, orig, truncaterev):
2121 from mercurial import vfs as vfsmod
2129 from mercurial import vfs as vfsmod
2122
2130
2123 if orig._inline:
2131 if orig._inline:
2124 raise error.Abort('not supporting inline revlog (yet)')
2132 raise error.Abort('not supporting inline revlog (yet)')
2125
2133
2126 origindexpath = orig.opener.join(orig.indexfile)
2134 origindexpath = orig.opener.join(orig.indexfile)
2127 origdatapath = orig.opener.join(orig.datafile)
2135 origdatapath = orig.opener.join(orig.datafile)
2128 indexname = 'revlog.i'
2136 indexname = 'revlog.i'
2129 dataname = 'revlog.d'
2137 dataname = 'revlog.d'
2130
2138
2131 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
2139 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
2132 try:
2140 try:
2133 # copy the data file in a temporary directory
2141 # copy the data file in a temporary directory
2134 ui.debug('copying data in %s\n' % tmpdir)
2142 ui.debug('copying data in %s\n' % tmpdir)
2135 destindexpath = os.path.join(tmpdir, 'revlog.i')
2143 destindexpath = os.path.join(tmpdir, 'revlog.i')
2136 destdatapath = os.path.join(tmpdir, 'revlog.d')
2144 destdatapath = os.path.join(tmpdir, 'revlog.d')
2137 shutil.copyfile(origindexpath, destindexpath)
2145 shutil.copyfile(origindexpath, destindexpath)
2138 shutil.copyfile(origdatapath, destdatapath)
2146 shutil.copyfile(origdatapath, destdatapath)
2139
2147
2140 # remove the data we want to add again
2148 # remove the data we want to add again
2141 ui.debug('truncating data to be rewritten\n')
2149 ui.debug('truncating data to be rewritten\n')
2142 with open(destindexpath, 'ab') as index:
2150 with open(destindexpath, 'ab') as index:
2143 index.seek(0)
2151 index.seek(0)
2144 index.truncate(truncaterev * orig._io.size)
2152 index.truncate(truncaterev * orig._io.size)
2145 with open(destdatapath, 'ab') as data:
2153 with open(destdatapath, 'ab') as data:
2146 data.seek(0)
2154 data.seek(0)
2147 data.truncate(orig.start(truncaterev))
2155 data.truncate(orig.start(truncaterev))
2148
2156
2149 # instantiate a new revlog from the temporary copy
2157 # instantiate a new revlog from the temporary copy
2150 ui.debug('truncating adding to be rewritten\n')
2158 ui.debug('truncating adding to be rewritten\n')
2151 vfs = vfsmod.vfs(tmpdir)
2159 vfs = vfsmod.vfs(tmpdir)
2152 vfs.options = getattr(orig.opener, 'options', None)
2160 vfs.options = getattr(orig.opener, 'options', None)
2153
2161
2154 dest = revlog.revlog(vfs,
2162 dest = revlog.revlog(vfs,
2155 indexfile=indexname,
2163 indexfile=indexname,
2156 datafile=dataname)
2164 datafile=dataname)
2157 if dest._inline:
2165 if dest._inline:
2158 raise error.Abort('not supporting inline revlog (yet)')
2166 raise error.Abort('not supporting inline revlog (yet)')
2159 # make sure internals are initialized
2167 # make sure internals are initialized
2160 dest.revision(len(dest) - 1)
2168 dest.revision(len(dest) - 1)
2161 yield dest
2169 yield dest
2162 del dest, vfs
2170 del dest, vfs
2163 finally:
2171 finally:
2164 shutil.rmtree(tmpdir, True)
2172 shutil.rmtree(tmpdir, True)
2165
2173
2166 @command(b'perfrevlogchunks', revlogopts + formatteropts +
2174 @command(b'perfrevlogchunks', revlogopts + formatteropts +
2167 [(b'e', b'engines', b'', b'compression engines to use'),
2175 [(b'e', b'engines', b'', b'compression engines to use'),
2168 (b's', b'startrev', 0, b'revision to start at')],
2176 (b's', b'startrev', 0, b'revision to start at')],
2169 b'-c|-m|FILE')
2177 b'-c|-m|FILE')
2170 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
2178 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
2171 """Benchmark operations on revlog chunks.
2179 """Benchmark operations on revlog chunks.
2172
2180
2173 Logically, each revlog is a collection of fulltext revisions. However,
2181 Logically, each revlog is a collection of fulltext revisions. However,
2174 stored within each revlog are "chunks" of possibly compressed data. This
2182 stored within each revlog are "chunks" of possibly compressed data. This
2175 data needs to be read and decompressed or compressed and written.
2183 data needs to be read and decompressed or compressed and written.
2176
2184
2177 This command measures the time it takes to read+decompress and recompress
2185 This command measures the time it takes to read+decompress and recompress
2178 chunks in a revlog. It effectively isolates I/O and compression performance.
2186 chunks in a revlog. It effectively isolates I/O and compression performance.
2179 For measurements of higher-level operations like resolving revisions,
2187 For measurements of higher-level operations like resolving revisions,
2180 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
2188 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
2181 """
2189 """
2182 opts = _byteskwargs(opts)
2190 opts = _byteskwargs(opts)
2183
2191
2184 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
2192 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
2185
2193
2186 # _chunkraw was renamed to _getsegmentforrevs.
2194 # _chunkraw was renamed to _getsegmentforrevs.
2187 try:
2195 try:
2188 segmentforrevs = rl._getsegmentforrevs
2196 segmentforrevs = rl._getsegmentforrevs
2189 except AttributeError:
2197 except AttributeError:
2190 segmentforrevs = rl._chunkraw
2198 segmentforrevs = rl._chunkraw
2191
2199
2192 # Verify engines argument.
2200 # Verify engines argument.
2193 if engines:
2201 if engines:
2194 engines = set(e.strip() for e in engines.split(b','))
2202 engines = set(e.strip() for e in engines.split(b','))
2195 for engine in engines:
2203 for engine in engines:
2196 try:
2204 try:
2197 util.compressionengines[engine]
2205 util.compressionengines[engine]
2198 except KeyError:
2206 except KeyError:
2199 raise error.Abort(b'unknown compression engine: %s' % engine)
2207 raise error.Abort(b'unknown compression engine: %s' % engine)
2200 else:
2208 else:
2201 engines = []
2209 engines = []
2202 for e in util.compengines:
2210 for e in util.compengines:
2203 engine = util.compengines[e]
2211 engine = util.compengines[e]
2204 try:
2212 try:
2205 if engine.available():
2213 if engine.available():
2206 engine.revlogcompressor().compress(b'dummy')
2214 engine.revlogcompressor().compress(b'dummy')
2207 engines.append(e)
2215 engines.append(e)
2208 except NotImplementedError:
2216 except NotImplementedError:
2209 pass
2217 pass
2210
2218
2211 revs = list(rl.revs(startrev, len(rl) - 1))
2219 revs = list(rl.revs(startrev, len(rl) - 1))
2212
2220
2213 def rlfh(rl):
2221 def rlfh(rl):
2214 if rl._inline:
2222 if rl._inline:
2215 return getsvfs(repo)(rl.indexfile)
2223 return getsvfs(repo)(rl.indexfile)
2216 else:
2224 else:
2217 return getsvfs(repo)(rl.datafile)
2225 return getsvfs(repo)(rl.datafile)
2218
2226
2219 def doread():
2227 def doread():
2220 rl.clearcaches()
2228 rl.clearcaches()
2221 for rev in revs:
2229 for rev in revs:
2222 segmentforrevs(rev, rev)
2230 segmentforrevs(rev, rev)
2223
2231
2224 def doreadcachedfh():
2232 def doreadcachedfh():
2225 rl.clearcaches()
2233 rl.clearcaches()
2226 fh = rlfh(rl)
2234 fh = rlfh(rl)
2227 for rev in revs:
2235 for rev in revs:
2228 segmentforrevs(rev, rev, df=fh)
2236 segmentforrevs(rev, rev, df=fh)
2229
2237
2230 def doreadbatch():
2238 def doreadbatch():
2231 rl.clearcaches()
2239 rl.clearcaches()
2232 segmentforrevs(revs[0], revs[-1])
2240 segmentforrevs(revs[0], revs[-1])
2233
2241
2234 def doreadbatchcachedfh():
2242 def doreadbatchcachedfh():
2235 rl.clearcaches()
2243 rl.clearcaches()
2236 fh = rlfh(rl)
2244 fh = rlfh(rl)
2237 segmentforrevs(revs[0], revs[-1], df=fh)
2245 segmentforrevs(revs[0], revs[-1], df=fh)
2238
2246
2239 def dochunk():
2247 def dochunk():
2240 rl.clearcaches()
2248 rl.clearcaches()
2241 fh = rlfh(rl)
2249 fh = rlfh(rl)
2242 for rev in revs:
2250 for rev in revs:
2243 rl._chunk(rev, df=fh)
2251 rl._chunk(rev, df=fh)
2244
2252
2245 chunks = [None]
2253 chunks = [None]
2246
2254
2247 def dochunkbatch():
2255 def dochunkbatch():
2248 rl.clearcaches()
2256 rl.clearcaches()
2249 fh = rlfh(rl)
2257 fh = rlfh(rl)
2250 # Save chunks as a side-effect.
2258 # Save chunks as a side-effect.
2251 chunks[0] = rl._chunks(revs, df=fh)
2259 chunks[0] = rl._chunks(revs, df=fh)
2252
2260
2253 def docompress(compressor):
2261 def docompress(compressor):
2254 rl.clearcaches()
2262 rl.clearcaches()
2255
2263
2256 try:
2264 try:
2257 # Swap in the requested compression engine.
2265 # Swap in the requested compression engine.
2258 oldcompressor = rl._compressor
2266 oldcompressor = rl._compressor
2259 rl._compressor = compressor
2267 rl._compressor = compressor
2260 for chunk in chunks[0]:
2268 for chunk in chunks[0]:
2261 rl.compress(chunk)
2269 rl.compress(chunk)
2262 finally:
2270 finally:
2263 rl._compressor = oldcompressor
2271 rl._compressor = oldcompressor
2264
2272
2265 benches = [
2273 benches = [
2266 (lambda: doread(), b'read'),
2274 (lambda: doread(), b'read'),
2267 (lambda: doreadcachedfh(), b'read w/ reused fd'),
2275 (lambda: doreadcachedfh(), b'read w/ reused fd'),
2268 (lambda: doreadbatch(), b'read batch'),
2276 (lambda: doreadbatch(), b'read batch'),
2269 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
2277 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
2270 (lambda: dochunk(), b'chunk'),
2278 (lambda: dochunk(), b'chunk'),
2271 (lambda: dochunkbatch(), b'chunk batch'),
2279 (lambda: dochunkbatch(), b'chunk batch'),
2272 ]
2280 ]
2273
2281
2274 for engine in sorted(engines):
2282 for engine in sorted(engines):
2275 compressor = util.compengines[engine].revlogcompressor()
2283 compressor = util.compengines[engine].revlogcompressor()
2276 benches.append((functools.partial(docompress, compressor),
2284 benches.append((functools.partial(docompress, compressor),
2277 b'compress w/ %s' % engine))
2285 b'compress w/ %s' % engine))
2278
2286
2279 for fn, title in benches:
2287 for fn, title in benches:
2280 timer, fm = gettimer(ui, opts)
2288 timer, fm = gettimer(ui, opts)
2281 timer(fn, title=title)
2289 timer(fn, title=title)
2282 fm.end()
2290 fm.end()
2283
2291
2284 @command(b'perfrevlogrevision', revlogopts + formatteropts +
2292 @command(b'perfrevlogrevision', revlogopts + formatteropts +
2285 [(b'', b'cache', False, b'use caches instead of clearing')],
2293 [(b'', b'cache', False, b'use caches instead of clearing')],
2286 b'-c|-m|FILE REV')
2294 b'-c|-m|FILE REV')
2287 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
2295 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
2288 """Benchmark obtaining a revlog revision.
2296 """Benchmark obtaining a revlog revision.
2289
2297
2290 Obtaining a revlog revision consists of roughly the following steps:
2298 Obtaining a revlog revision consists of roughly the following steps:
2291
2299
2292 1. Compute the delta chain
2300 1. Compute the delta chain
2293 2. Slice the delta chain if applicable
2301 2. Slice the delta chain if applicable
2294 3. Obtain the raw chunks for that delta chain
2302 3. Obtain the raw chunks for that delta chain
2295 4. Decompress each raw chunk
2303 4. Decompress each raw chunk
2296 5. Apply binary patches to obtain fulltext
2304 5. Apply binary patches to obtain fulltext
2297 6. Verify hash of fulltext
2305 6. Verify hash of fulltext
2298
2306
2299 This command measures the time spent in each of these phases.
2307 This command measures the time spent in each of these phases.
2300 """
2308 """
2301 opts = _byteskwargs(opts)
2309 opts = _byteskwargs(opts)
2302
2310
2303 if opts.get(b'changelog') or opts.get(b'manifest'):
2311 if opts.get(b'changelog') or opts.get(b'manifest'):
2304 file_, rev = None, file_
2312 file_, rev = None, file_
2305 elif rev is None:
2313 elif rev is None:
2306 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
2314 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
2307
2315
2308 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
2316 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
2309
2317
2310 # _chunkraw was renamed to _getsegmentforrevs.
2318 # _chunkraw was renamed to _getsegmentforrevs.
2311 try:
2319 try:
2312 segmentforrevs = r._getsegmentforrevs
2320 segmentforrevs = r._getsegmentforrevs
2313 except AttributeError:
2321 except AttributeError:
2314 segmentforrevs = r._chunkraw
2322 segmentforrevs = r._chunkraw
2315
2323
2316 node = r.lookup(rev)
2324 node = r.lookup(rev)
2317 rev = r.rev(node)
2325 rev = r.rev(node)
2318
2326
2319 def getrawchunks(data, chain):
2327 def getrawchunks(data, chain):
2320 start = r.start
2328 start = r.start
2321 length = r.length
2329 length = r.length
2322 inline = r._inline
2330 inline = r._inline
2323 iosize = r._io.size
2331 iosize = r._io.size
2324 buffer = util.buffer
2332 buffer = util.buffer
2325
2333
2326 chunks = []
2334 chunks = []
2327 ladd = chunks.append
2335 ladd = chunks.append
2328 for idx, item in enumerate(chain):
2336 for idx, item in enumerate(chain):
2329 offset = start(item[0])
2337 offset = start(item[0])
2330 bits = data[idx]
2338 bits = data[idx]
2331 for rev in item:
2339 for rev in item:
2332 chunkstart = start(rev)
2340 chunkstart = start(rev)
2333 if inline:
2341 if inline:
2334 chunkstart += (rev + 1) * iosize
2342 chunkstart += (rev + 1) * iosize
2335 chunklength = length(rev)
2343 chunklength = length(rev)
2336 ladd(buffer(bits, chunkstart - offset, chunklength))
2344 ladd(buffer(bits, chunkstart - offset, chunklength))
2337
2345
2338 return chunks
2346 return chunks
2339
2347
2340 def dodeltachain(rev):
2348 def dodeltachain(rev):
2341 if not cache:
2349 if not cache:
2342 r.clearcaches()
2350 r.clearcaches()
2343 r._deltachain(rev)
2351 r._deltachain(rev)
2344
2352
2345 def doread(chain):
2353 def doread(chain):
2346 if not cache:
2354 if not cache:
2347 r.clearcaches()
2355 r.clearcaches()
2348 for item in slicedchain:
2356 for item in slicedchain:
2349 segmentforrevs(item[0], item[-1])
2357 segmentforrevs(item[0], item[-1])
2350
2358
2351 def doslice(r, chain, size):
2359 def doslice(r, chain, size):
2352 for s in slicechunk(r, chain, targetsize=size):
2360 for s in slicechunk(r, chain, targetsize=size):
2353 pass
2361 pass
2354
2362
2355 def dorawchunks(data, chain):
2363 def dorawchunks(data, chain):
2356 if not cache:
2364 if not cache:
2357 r.clearcaches()
2365 r.clearcaches()
2358 getrawchunks(data, chain)
2366 getrawchunks(data, chain)
2359
2367
2360 def dodecompress(chunks):
2368 def dodecompress(chunks):
2361 decomp = r.decompress
2369 decomp = r.decompress
2362 for chunk in chunks:
2370 for chunk in chunks:
2363 decomp(chunk)
2371 decomp(chunk)
2364
2372
2365 def dopatch(text, bins):
2373 def dopatch(text, bins):
2366 if not cache:
2374 if not cache:
2367 r.clearcaches()
2375 r.clearcaches()
2368 mdiff.patches(text, bins)
2376 mdiff.patches(text, bins)
2369
2377
2370 def dohash(text):
2378 def dohash(text):
2371 if not cache:
2379 if not cache:
2372 r.clearcaches()
2380 r.clearcaches()
2373 r.checkhash(text, node, rev=rev)
2381 r.checkhash(text, node, rev=rev)
2374
2382
2375 def dorevision():
2383 def dorevision():
2376 if not cache:
2384 if not cache:
2377 r.clearcaches()
2385 r.clearcaches()
2378 r.revision(node)
2386 r.revision(node)
2379
2387
2380 try:
2388 try:
2381 from mercurial.revlogutils.deltas import slicechunk
2389 from mercurial.revlogutils.deltas import slicechunk
2382 except ImportError:
2390 except ImportError:
2383 slicechunk = getattr(revlog, '_slicechunk', None)
2391 slicechunk = getattr(revlog, '_slicechunk', None)
2384
2392
2385 size = r.length(rev)
2393 size = r.length(rev)
2386 chain = r._deltachain(rev)[0]
2394 chain = r._deltachain(rev)[0]
2387 if not getattr(r, '_withsparseread', False):
2395 if not getattr(r, '_withsparseread', False):
2388 slicedchain = (chain,)
2396 slicedchain = (chain,)
2389 else:
2397 else:
2390 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
2398 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
2391 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
2399 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
2392 rawchunks = getrawchunks(data, slicedchain)
2400 rawchunks = getrawchunks(data, slicedchain)
2393 bins = r._chunks(chain)
2401 bins = r._chunks(chain)
2394 text = bytes(bins[0])
2402 text = bytes(bins[0])
2395 bins = bins[1:]
2403 bins = bins[1:]
2396 text = mdiff.patches(text, bins)
2404 text = mdiff.patches(text, bins)
2397
2405
2398 benches = [
2406 benches = [
2399 (lambda: dorevision(), b'full'),
2407 (lambda: dorevision(), b'full'),
2400 (lambda: dodeltachain(rev), b'deltachain'),
2408 (lambda: dodeltachain(rev), b'deltachain'),
2401 (lambda: doread(chain), b'read'),
2409 (lambda: doread(chain), b'read'),
2402 ]
2410 ]
2403
2411
2404 if getattr(r, '_withsparseread', False):
2412 if getattr(r, '_withsparseread', False):
2405 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
2413 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
2406 benches.append(slicing)
2414 benches.append(slicing)
2407
2415
2408 benches.extend([
2416 benches.extend([
2409 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
2417 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
2410 (lambda: dodecompress(rawchunks), b'decompress'),
2418 (lambda: dodecompress(rawchunks), b'decompress'),
2411 (lambda: dopatch(text, bins), b'patch'),
2419 (lambda: dopatch(text, bins), b'patch'),
2412 (lambda: dohash(text), b'hash'),
2420 (lambda: dohash(text), b'hash'),
2413 ])
2421 ])
2414
2422
2415 timer, fm = gettimer(ui, opts)
2423 timer, fm = gettimer(ui, opts)
2416 for fn, title in benches:
2424 for fn, title in benches:
2417 timer(fn, title=title)
2425 timer(fn, title=title)
2418 fm.end()
2426 fm.end()
2419
2427
2420 @command(b'perfrevset',
2428 @command(b'perfrevset',
2421 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
2429 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
2422 (b'', b'contexts', False, b'obtain changectx for each revision')]
2430 (b'', b'contexts', False, b'obtain changectx for each revision')]
2423 + formatteropts, b"REVSET")
2431 + formatteropts, b"REVSET")
2424 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
2432 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
2425 """benchmark the execution time of a revset
2433 """benchmark the execution time of a revset
2426
2434
2427 Use the --clean option if need to evaluate the impact of build volatile
2435 Use the --clean option if need to evaluate the impact of build volatile
2428 revisions set cache on the revset execution. Volatile cache hold filtered
2436 revisions set cache on the revset execution. Volatile cache hold filtered
2429 and obsolete related cache."""
2437 and obsolete related cache."""
2430 opts = _byteskwargs(opts)
2438 opts = _byteskwargs(opts)
2431
2439
2432 timer, fm = gettimer(ui, opts)
2440 timer, fm = gettimer(ui, opts)
2433 def d():
2441 def d():
2434 if clear:
2442 if clear:
2435 repo.invalidatevolatilesets()
2443 repo.invalidatevolatilesets()
2436 if contexts:
2444 if contexts:
2437 for ctx in repo.set(expr): pass
2445 for ctx in repo.set(expr): pass
2438 else:
2446 else:
2439 for r in repo.revs(expr): pass
2447 for r in repo.revs(expr): pass
2440 timer(d)
2448 timer(d)
2441 fm.end()
2449 fm.end()
2442
2450
2443 @command(b'perfvolatilesets',
2451 @command(b'perfvolatilesets',
2444 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
2452 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
2445 ] + formatteropts)
2453 ] + formatteropts)
2446 def perfvolatilesets(ui, repo, *names, **opts):
2454 def perfvolatilesets(ui, repo, *names, **opts):
2447 """benchmark the computation of various volatile set
2455 """benchmark the computation of various volatile set
2448
2456
2449 Volatile set computes element related to filtering and obsolescence."""
2457 Volatile set computes element related to filtering and obsolescence."""
2450 opts = _byteskwargs(opts)
2458 opts = _byteskwargs(opts)
2451 timer, fm = gettimer(ui, opts)
2459 timer, fm = gettimer(ui, opts)
2452 repo = repo.unfiltered()
2460 repo = repo.unfiltered()
2453
2461
2454 def getobs(name):
2462 def getobs(name):
2455 def d():
2463 def d():
2456 repo.invalidatevolatilesets()
2464 repo.invalidatevolatilesets()
2457 if opts[b'clear_obsstore']:
2465 if opts[b'clear_obsstore']:
2458 clearfilecache(repo, b'obsstore')
2466 clearfilecache(repo, b'obsstore')
2459 obsolete.getrevs(repo, name)
2467 obsolete.getrevs(repo, name)
2460 return d
2468 return d
2461
2469
2462 allobs = sorted(obsolete.cachefuncs)
2470 allobs = sorted(obsolete.cachefuncs)
2463 if names:
2471 if names:
2464 allobs = [n for n in allobs if n in names]
2472 allobs = [n for n in allobs if n in names]
2465
2473
2466 for name in allobs:
2474 for name in allobs:
2467 timer(getobs(name), title=name)
2475 timer(getobs(name), title=name)
2468
2476
2469 def getfiltered(name):
2477 def getfiltered(name):
2470 def d():
2478 def d():
2471 repo.invalidatevolatilesets()
2479 repo.invalidatevolatilesets()
2472 if opts[b'clear_obsstore']:
2480 if opts[b'clear_obsstore']:
2473 clearfilecache(repo, b'obsstore')
2481 clearfilecache(repo, b'obsstore')
2474 repoview.filterrevs(repo, name)
2482 repoview.filterrevs(repo, name)
2475 return d
2483 return d
2476
2484
2477 allfilter = sorted(repoview.filtertable)
2485 allfilter = sorted(repoview.filtertable)
2478 if names:
2486 if names:
2479 allfilter = [n for n in allfilter if n in names]
2487 allfilter = [n for n in allfilter if n in names]
2480
2488
2481 for name in allfilter:
2489 for name in allfilter:
2482 timer(getfiltered(name), title=name)
2490 timer(getfiltered(name), title=name)
2483 fm.end()
2491 fm.end()
2484
2492
2485 @command(b'perfbranchmap',
2493 @command(b'perfbranchmap',
2486 [(b'f', b'full', False,
2494 [(b'f', b'full', False,
2487 b'Includes build time of subset'),
2495 b'Includes build time of subset'),
2488 (b'', b'clear-revbranch', False,
2496 (b'', b'clear-revbranch', False,
2489 b'purge the revbranch cache between computation'),
2497 b'purge the revbranch cache between computation'),
2490 ] + formatteropts)
2498 ] + formatteropts)
2491 def perfbranchmap(ui, repo, *filternames, **opts):
2499 def perfbranchmap(ui, repo, *filternames, **opts):
2492 """benchmark the update of a branchmap
2500 """benchmark the update of a branchmap
2493
2501
2494 This benchmarks the full repo.branchmap() call with read and write disabled
2502 This benchmarks the full repo.branchmap() call with read and write disabled
2495 """
2503 """
2496 opts = _byteskwargs(opts)
2504 opts = _byteskwargs(opts)
2497 full = opts.get(b"full", False)
2505 full = opts.get(b"full", False)
2498 clear_revbranch = opts.get(b"clear_revbranch", False)
2506 clear_revbranch = opts.get(b"clear_revbranch", False)
2499 timer, fm = gettimer(ui, opts)
2507 timer, fm = gettimer(ui, opts)
2500 def getbranchmap(filtername):
2508 def getbranchmap(filtername):
2501 """generate a benchmark function for the filtername"""
2509 """generate a benchmark function for the filtername"""
2502 if filtername is None:
2510 if filtername is None:
2503 view = repo
2511 view = repo
2504 else:
2512 else:
2505 view = repo.filtered(filtername)
2513 view = repo.filtered(filtername)
2506 if util.safehasattr(view._branchcaches, '_per_filter'):
2514 if util.safehasattr(view._branchcaches, '_per_filter'):
2507 filtered = view._branchcaches._per_filter
2515 filtered = view._branchcaches._per_filter
2508 else:
2516 else:
2509 # older versions
2517 # older versions
2510 filtered = view._branchcaches
2518 filtered = view._branchcaches
2511 def d():
2519 def d():
2512 if clear_revbranch:
2520 if clear_revbranch:
2513 repo.revbranchcache()._clear()
2521 repo.revbranchcache()._clear()
2514 if full:
2522 if full:
2515 view._branchcaches.clear()
2523 view._branchcaches.clear()
2516 else:
2524 else:
2517 filtered.pop(filtername, None)
2525 filtered.pop(filtername, None)
2518 view.branchmap()
2526 view.branchmap()
2519 return d
2527 return d
2520 # add filter in smaller subset to bigger subset
2528 # add filter in smaller subset to bigger subset
2521 possiblefilters = set(repoview.filtertable)
2529 possiblefilters = set(repoview.filtertable)
2522 if filternames:
2530 if filternames:
2523 possiblefilters &= set(filternames)
2531 possiblefilters &= set(filternames)
2524 subsettable = getbranchmapsubsettable()
2532 subsettable = getbranchmapsubsettable()
2525 allfilters = []
2533 allfilters = []
2526 while possiblefilters:
2534 while possiblefilters:
2527 for name in possiblefilters:
2535 for name in possiblefilters:
2528 subset = subsettable.get(name)
2536 subset = subsettable.get(name)
2529 if subset not in possiblefilters:
2537 if subset not in possiblefilters:
2530 break
2538 break
2531 else:
2539 else:
2532 assert False, b'subset cycle %s!' % possiblefilters
2540 assert False, b'subset cycle %s!' % possiblefilters
2533 allfilters.append(name)
2541 allfilters.append(name)
2534 possiblefilters.remove(name)
2542 possiblefilters.remove(name)
2535
2543
2536 # warm the cache
2544 # warm the cache
2537 if not full:
2545 if not full:
2538 for name in allfilters:
2546 for name in allfilters:
2539 repo.filtered(name).branchmap()
2547 repo.filtered(name).branchmap()
2540 if not filternames or b'unfiltered' in filternames:
2548 if not filternames or b'unfiltered' in filternames:
2541 # add unfiltered
2549 # add unfiltered
2542 allfilters.append(None)
2550 allfilters.append(None)
2543
2551
2544 if util.safehasattr(branchmap.branchcache, 'fromfile'):
2552 if util.safehasattr(branchmap.branchcache, 'fromfile'):
2545 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
2553 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
2546 branchcacheread.set(classmethod(lambda *args: None))
2554 branchcacheread.set(classmethod(lambda *args: None))
2547 else:
2555 else:
2548 # older versions
2556 # older versions
2549 branchcacheread = safeattrsetter(branchmap, b'read')
2557 branchcacheread = safeattrsetter(branchmap, b'read')
2550 branchcacheread.set(lambda *args: None)
2558 branchcacheread.set(lambda *args: None)
2551 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
2559 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
2552 branchcachewrite.set(lambda *args: None)
2560 branchcachewrite.set(lambda *args: None)
2553 try:
2561 try:
2554 for name in allfilters:
2562 for name in allfilters:
2555 printname = name
2563 printname = name
2556 if name is None:
2564 if name is None:
2557 printname = b'unfiltered'
2565 printname = b'unfiltered'
2558 timer(getbranchmap(name), title=str(printname))
2566 timer(getbranchmap(name), title=str(printname))
2559 finally:
2567 finally:
2560 branchcacheread.restore()
2568 branchcacheread.restore()
2561 branchcachewrite.restore()
2569 branchcachewrite.restore()
2562 fm.end()
2570 fm.end()
2563
2571
2564 @command(b'perfbranchmapupdate', [
2572 @command(b'perfbranchmapupdate', [
2565 (b'', b'base', [], b'subset of revision to start from'),
2573 (b'', b'base', [], b'subset of revision to start from'),
2566 (b'', b'target', [], b'subset of revision to end with'),
2574 (b'', b'target', [], b'subset of revision to end with'),
2567 (b'', b'clear-caches', False, b'clear cache between each runs')
2575 (b'', b'clear-caches', False, b'clear cache between each runs')
2568 ] + formatteropts)
2576 ] + formatteropts)
2569 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
2577 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
2570 """benchmark branchmap update from for <base> revs to <target> revs
2578 """benchmark branchmap update from for <base> revs to <target> revs
2571
2579
2572 If `--clear-caches` is passed, the following items will be reset before
2580 If `--clear-caches` is passed, the following items will be reset before
2573 each update:
2581 each update:
2574 * the changelog instance and associated indexes
2582 * the changelog instance and associated indexes
2575 * the rev-branch-cache instance
2583 * the rev-branch-cache instance
2576
2584
2577 Examples:
2585 Examples:
2578
2586
2579 # update for the one last revision
2587 # update for the one last revision
2580 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
2588 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
2581
2589
2582 $ update for change coming with a new branch
2590 $ update for change coming with a new branch
2583 $ hg perfbranchmapupdate --base 'stable' --target 'default'
2591 $ hg perfbranchmapupdate --base 'stable' --target 'default'
2584 """
2592 """
2585 from mercurial import branchmap
2593 from mercurial import branchmap
2586 from mercurial import repoview
2594 from mercurial import repoview
2587 opts = _byteskwargs(opts)
2595 opts = _byteskwargs(opts)
2588 timer, fm = gettimer(ui, opts)
2596 timer, fm = gettimer(ui, opts)
2589 clearcaches = opts[b'clear_caches']
2597 clearcaches = opts[b'clear_caches']
2590 unfi = repo.unfiltered()
2598 unfi = repo.unfiltered()
2591 x = [None] # used to pass data between closure
2599 x = [None] # used to pass data between closure
2592
2600
2593 # we use a `list` here to avoid possible side effect from smartset
2601 # we use a `list` here to avoid possible side effect from smartset
2594 baserevs = list(scmutil.revrange(repo, base))
2602 baserevs = list(scmutil.revrange(repo, base))
2595 targetrevs = list(scmutil.revrange(repo, target))
2603 targetrevs = list(scmutil.revrange(repo, target))
2596 if not baserevs:
2604 if not baserevs:
2597 raise error.Abort(b'no revisions selected for --base')
2605 raise error.Abort(b'no revisions selected for --base')
2598 if not targetrevs:
2606 if not targetrevs:
2599 raise error.Abort(b'no revisions selected for --target')
2607 raise error.Abort(b'no revisions selected for --target')
2600
2608
2601 # make sure the target branchmap also contains the one in the base
2609 # make sure the target branchmap also contains the one in the base
2602 targetrevs = list(set(baserevs) | set(targetrevs))
2610 targetrevs = list(set(baserevs) | set(targetrevs))
2603 targetrevs.sort()
2611 targetrevs.sort()
2604
2612
2605 cl = repo.changelog
2613 cl = repo.changelog
2606 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
2614 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
2607 allbaserevs.sort()
2615 allbaserevs.sort()
2608 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
2616 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
2609
2617
2610 newrevs = list(alltargetrevs.difference(allbaserevs))
2618 newrevs = list(alltargetrevs.difference(allbaserevs))
2611 newrevs.sort()
2619 newrevs.sort()
2612
2620
2613 allrevs = frozenset(unfi.changelog.revs())
2621 allrevs = frozenset(unfi.changelog.revs())
2614 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
2622 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
2615 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
2623 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
2616
2624
2617 def basefilter(repo, visibilityexceptions=None):
2625 def basefilter(repo, visibilityexceptions=None):
2618 return basefilterrevs
2626 return basefilterrevs
2619
2627
2620 def targetfilter(repo, visibilityexceptions=None):
2628 def targetfilter(repo, visibilityexceptions=None):
2621 return targetfilterrevs
2629 return targetfilterrevs
2622
2630
2623 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
2631 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
2624 ui.status(msg % (len(allbaserevs), len(newrevs)))
2632 ui.status(msg % (len(allbaserevs), len(newrevs)))
2625 if targetfilterrevs:
2633 if targetfilterrevs:
2626 msg = b'(%d revisions still filtered)\n'
2634 msg = b'(%d revisions still filtered)\n'
2627 ui.status(msg % len(targetfilterrevs))
2635 ui.status(msg % len(targetfilterrevs))
2628
2636
2629 try:
2637 try:
2630 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
2638 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
2631 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
2639 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
2632
2640
2633 baserepo = repo.filtered(b'__perf_branchmap_update_base')
2641 baserepo = repo.filtered(b'__perf_branchmap_update_base')
2634 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
2642 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
2635
2643
2636 # try to find an existing branchmap to reuse
2644 # try to find an existing branchmap to reuse
2637 subsettable = getbranchmapsubsettable()
2645 subsettable = getbranchmapsubsettable()
2638 candidatefilter = subsettable.get(None)
2646 candidatefilter = subsettable.get(None)
2639 while candidatefilter is not None:
2647 while candidatefilter is not None:
2640 candidatebm = repo.filtered(candidatefilter).branchmap()
2648 candidatebm = repo.filtered(candidatefilter).branchmap()
2641 if candidatebm.validfor(baserepo):
2649 if candidatebm.validfor(baserepo):
2642 filtered = repoview.filterrevs(repo, candidatefilter)
2650 filtered = repoview.filterrevs(repo, candidatefilter)
2643 missing = [r for r in allbaserevs if r in filtered]
2651 missing = [r for r in allbaserevs if r in filtered]
2644 base = candidatebm.copy()
2652 base = candidatebm.copy()
2645 base.update(baserepo, missing)
2653 base.update(baserepo, missing)
2646 break
2654 break
2647 candidatefilter = subsettable.get(candidatefilter)
2655 candidatefilter = subsettable.get(candidatefilter)
2648 else:
2656 else:
2649 # no suitable subset where found
2657 # no suitable subset where found
2650 base = branchmap.branchcache()
2658 base = branchmap.branchcache()
2651 base.update(baserepo, allbaserevs)
2659 base.update(baserepo, allbaserevs)
2652
2660
2653 def setup():
2661 def setup():
2654 x[0] = base.copy()
2662 x[0] = base.copy()
2655 if clearcaches:
2663 if clearcaches:
2656 unfi._revbranchcache = None
2664 unfi._revbranchcache = None
2657 clearchangelog(repo)
2665 clearchangelog(repo)
2658
2666
2659 def bench():
2667 def bench():
2660 x[0].update(targetrepo, newrevs)
2668 x[0].update(targetrepo, newrevs)
2661
2669
2662 timer(bench, setup=setup)
2670 timer(bench, setup=setup)
2663 fm.end()
2671 fm.end()
2664 finally:
2672 finally:
2665 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
2673 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
2666 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
2674 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
2667
2675
2668 @command(b'perfbranchmapload', [
2676 @command(b'perfbranchmapload', [
2669 (b'f', b'filter', b'', b'Specify repoview filter'),
2677 (b'f', b'filter', b'', b'Specify repoview filter'),
2670 (b'', b'list', False, b'List brachmap filter caches'),
2678 (b'', b'list', False, b'List brachmap filter caches'),
2671 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
2679 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
2672
2680
2673 ] + formatteropts)
2681 ] + formatteropts)
2674 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
2682 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
2675 """benchmark reading the branchmap"""
2683 """benchmark reading the branchmap"""
2676 opts = _byteskwargs(opts)
2684 opts = _byteskwargs(opts)
2677 clearrevlogs = opts[b'clear_revlogs']
2685 clearrevlogs = opts[b'clear_revlogs']
2678
2686
2679 if list:
2687 if list:
2680 for name, kind, st in repo.cachevfs.readdir(stat=True):
2688 for name, kind, st in repo.cachevfs.readdir(stat=True):
2681 if name.startswith(b'branch2'):
2689 if name.startswith(b'branch2'):
2682 filtername = name.partition(b'-')[2] or b'unfiltered'
2690 filtername = name.partition(b'-')[2] or b'unfiltered'
2683 ui.status(b'%s - %s\n'
2691 ui.status(b'%s - %s\n'
2684 % (filtername, util.bytecount(st.st_size)))
2692 % (filtername, util.bytecount(st.st_size)))
2685 return
2693 return
2686 if not filter:
2694 if not filter:
2687 filter = None
2695 filter = None
2688 subsettable = getbranchmapsubsettable()
2696 subsettable = getbranchmapsubsettable()
2689 if filter is None:
2697 if filter is None:
2690 repo = repo.unfiltered()
2698 repo = repo.unfiltered()
2691 else:
2699 else:
2692 repo = repoview.repoview(repo, filter)
2700 repo = repoview.repoview(repo, filter)
2693
2701
2694 repo.branchmap() # make sure we have a relevant, up to date branchmap
2702 repo.branchmap() # make sure we have a relevant, up to date branchmap
2695
2703
2696 try:
2704 try:
2697 fromfile = branchmap.branchcache.fromfile
2705 fromfile = branchmap.branchcache.fromfile
2698 except AttributeError:
2706 except AttributeError:
2699 # older versions
2707 # older versions
2700 fromfile = branchmap.read
2708 fromfile = branchmap.read
2701
2709
2702 currentfilter = filter
2710 currentfilter = filter
2703 # try once without timer, the filter may not be cached
2711 # try once without timer, the filter may not be cached
2704 while fromfile(repo) is None:
2712 while fromfile(repo) is None:
2705 currentfilter = subsettable.get(currentfilter)
2713 currentfilter = subsettable.get(currentfilter)
2706 if currentfilter is None:
2714 if currentfilter is None:
2707 raise error.Abort(b'No branchmap cached for %s repo'
2715 raise error.Abort(b'No branchmap cached for %s repo'
2708 % (filter or b'unfiltered'))
2716 % (filter or b'unfiltered'))
2709 repo = repo.filtered(currentfilter)
2717 repo = repo.filtered(currentfilter)
2710 timer, fm = gettimer(ui, opts)
2718 timer, fm = gettimer(ui, opts)
2711 def setup():
2719 def setup():
2712 if clearrevlogs:
2720 if clearrevlogs:
2713 clearchangelog(repo)
2721 clearchangelog(repo)
2714 def bench():
2722 def bench():
2715 fromfile(repo)
2723 fromfile(repo)
2716 timer(bench, setup=setup)
2724 timer(bench, setup=setup)
2717 fm.end()
2725 fm.end()
2718
2726
2719 @command(b'perfloadmarkers')
2727 @command(b'perfloadmarkers')
2720 def perfloadmarkers(ui, repo):
2728 def perfloadmarkers(ui, repo):
2721 """benchmark the time to parse the on-disk markers for a repo
2729 """benchmark the time to parse the on-disk markers for a repo
2722
2730
2723 Result is the number of markers in the repo."""
2731 Result is the number of markers in the repo."""
2724 timer, fm = gettimer(ui)
2732 timer, fm = gettimer(ui)
2725 svfs = getsvfs(repo)
2733 svfs = getsvfs(repo)
2726 timer(lambda: len(obsolete.obsstore(svfs)))
2734 timer(lambda: len(obsolete.obsstore(svfs)))
2727 fm.end()
2735 fm.end()
2728
2736
2729 @command(b'perflrucachedict', formatteropts +
2737 @command(b'perflrucachedict', formatteropts +
2730 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
2738 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
2731 (b'', b'mincost', 0, b'smallest cost of items in cache'),
2739 (b'', b'mincost', 0, b'smallest cost of items in cache'),
2732 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
2740 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
2733 (b'', b'size', 4, b'size of cache'),
2741 (b'', b'size', 4, b'size of cache'),
2734 (b'', b'gets', 10000, b'number of key lookups'),
2742 (b'', b'gets', 10000, b'number of key lookups'),
2735 (b'', b'sets', 10000, b'number of key sets'),
2743 (b'', b'sets', 10000, b'number of key sets'),
2736 (b'', b'mixed', 10000, b'number of mixed mode operations'),
2744 (b'', b'mixed', 10000, b'number of mixed mode operations'),
2737 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
2745 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
2738 norepo=True)
2746 norepo=True)
2739 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
2747 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
2740 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
2748 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
2741 opts = _byteskwargs(opts)
2749 opts = _byteskwargs(opts)
2742
2750
2743 def doinit():
2751 def doinit():
2744 for i in _xrange(10000):
2752 for i in _xrange(10000):
2745 util.lrucachedict(size)
2753 util.lrucachedict(size)
2746
2754
2747 costrange = list(range(mincost, maxcost + 1))
2755 costrange = list(range(mincost, maxcost + 1))
2748
2756
2749 values = []
2757 values = []
2750 for i in _xrange(size):
2758 for i in _xrange(size):
2751 values.append(random.randint(0, _maxint))
2759 values.append(random.randint(0, _maxint))
2752
2760
2753 # Get mode fills the cache and tests raw lookup performance with no
2761 # Get mode fills the cache and tests raw lookup performance with no
2754 # eviction.
2762 # eviction.
2755 getseq = []
2763 getseq = []
2756 for i in _xrange(gets):
2764 for i in _xrange(gets):
2757 getseq.append(random.choice(values))
2765 getseq.append(random.choice(values))
2758
2766
2759 def dogets():
2767 def dogets():
2760 d = util.lrucachedict(size)
2768 d = util.lrucachedict(size)
2761 for v in values:
2769 for v in values:
2762 d[v] = v
2770 d[v] = v
2763 for key in getseq:
2771 for key in getseq:
2764 value = d[key]
2772 value = d[key]
2765 value # silence pyflakes warning
2773 value # silence pyflakes warning
2766
2774
2767 def dogetscost():
2775 def dogetscost():
2768 d = util.lrucachedict(size, maxcost=costlimit)
2776 d = util.lrucachedict(size, maxcost=costlimit)
2769 for i, v in enumerate(values):
2777 for i, v in enumerate(values):
2770 d.insert(v, v, cost=costs[i])
2778 d.insert(v, v, cost=costs[i])
2771 for key in getseq:
2779 for key in getseq:
2772 try:
2780 try:
2773 value = d[key]
2781 value = d[key]
2774 value # silence pyflakes warning
2782 value # silence pyflakes warning
2775 except KeyError:
2783 except KeyError:
2776 pass
2784 pass
2777
2785
2778 # Set mode tests insertion speed with cache eviction.
2786 # Set mode tests insertion speed with cache eviction.
2779 setseq = []
2787 setseq = []
2780 costs = []
2788 costs = []
2781 for i in _xrange(sets):
2789 for i in _xrange(sets):
2782 setseq.append(random.randint(0, _maxint))
2790 setseq.append(random.randint(0, _maxint))
2783 costs.append(random.choice(costrange))
2791 costs.append(random.choice(costrange))
2784
2792
2785 def doinserts():
2793 def doinserts():
2786 d = util.lrucachedict(size)
2794 d = util.lrucachedict(size)
2787 for v in setseq:
2795 for v in setseq:
2788 d.insert(v, v)
2796 d.insert(v, v)
2789
2797
2790 def doinsertscost():
2798 def doinsertscost():
2791 d = util.lrucachedict(size, maxcost=costlimit)
2799 d = util.lrucachedict(size, maxcost=costlimit)
2792 for i, v in enumerate(setseq):
2800 for i, v in enumerate(setseq):
2793 d.insert(v, v, cost=costs[i])
2801 d.insert(v, v, cost=costs[i])
2794
2802
2795 def dosets():
2803 def dosets():
2796 d = util.lrucachedict(size)
2804 d = util.lrucachedict(size)
2797 for v in setseq:
2805 for v in setseq:
2798 d[v] = v
2806 d[v] = v
2799
2807
2800 # Mixed mode randomly performs gets and sets with eviction.
2808 # Mixed mode randomly performs gets and sets with eviction.
2801 mixedops = []
2809 mixedops = []
2802 for i in _xrange(mixed):
2810 for i in _xrange(mixed):
2803 r = random.randint(0, 100)
2811 r = random.randint(0, 100)
2804 if r < mixedgetfreq:
2812 if r < mixedgetfreq:
2805 op = 0
2813 op = 0
2806 else:
2814 else:
2807 op = 1
2815 op = 1
2808
2816
2809 mixedops.append((op,
2817 mixedops.append((op,
2810 random.randint(0, size * 2),
2818 random.randint(0, size * 2),
2811 random.choice(costrange)))
2819 random.choice(costrange)))
2812
2820
2813 def domixed():
2821 def domixed():
2814 d = util.lrucachedict(size)
2822 d = util.lrucachedict(size)
2815
2823
2816 for op, v, cost in mixedops:
2824 for op, v, cost in mixedops:
2817 if op == 0:
2825 if op == 0:
2818 try:
2826 try:
2819 d[v]
2827 d[v]
2820 except KeyError:
2828 except KeyError:
2821 pass
2829 pass
2822 else:
2830 else:
2823 d[v] = v
2831 d[v] = v
2824
2832
2825 def domixedcost():
2833 def domixedcost():
2826 d = util.lrucachedict(size, maxcost=costlimit)
2834 d = util.lrucachedict(size, maxcost=costlimit)
2827
2835
2828 for op, v, cost in mixedops:
2836 for op, v, cost in mixedops:
2829 if op == 0:
2837 if op == 0:
2830 try:
2838 try:
2831 d[v]
2839 d[v]
2832 except KeyError:
2840 except KeyError:
2833 pass
2841 pass
2834 else:
2842 else:
2835 d.insert(v, v, cost=cost)
2843 d.insert(v, v, cost=cost)
2836
2844
2837 benches = [
2845 benches = [
2838 (doinit, b'init'),
2846 (doinit, b'init'),
2839 ]
2847 ]
2840
2848
2841 if costlimit:
2849 if costlimit:
2842 benches.extend([
2850 benches.extend([
2843 (dogetscost, b'gets w/ cost limit'),
2851 (dogetscost, b'gets w/ cost limit'),
2844 (doinsertscost, b'inserts w/ cost limit'),
2852 (doinsertscost, b'inserts w/ cost limit'),
2845 (domixedcost, b'mixed w/ cost limit'),
2853 (domixedcost, b'mixed w/ cost limit'),
2846 ])
2854 ])
2847 else:
2855 else:
2848 benches.extend([
2856 benches.extend([
2849 (dogets, b'gets'),
2857 (dogets, b'gets'),
2850 (doinserts, b'inserts'),
2858 (doinserts, b'inserts'),
2851 (dosets, b'sets'),
2859 (dosets, b'sets'),
2852 (domixed, b'mixed')
2860 (domixed, b'mixed')
2853 ])
2861 ])
2854
2862
2855 for fn, title in benches:
2863 for fn, title in benches:
2856 timer, fm = gettimer(ui, opts)
2864 timer, fm = gettimer(ui, opts)
2857 timer(fn, title=title)
2865 timer(fn, title=title)
2858 fm.end()
2866 fm.end()
2859
2867
2860 @command(b'perfwrite', formatteropts)
2868 @command(b'perfwrite', formatteropts)
2861 def perfwrite(ui, repo, **opts):
2869 def perfwrite(ui, repo, **opts):
2862 """microbenchmark ui.write
2870 """microbenchmark ui.write
2863 """
2871 """
2864 opts = _byteskwargs(opts)
2872 opts = _byteskwargs(opts)
2865
2873
2866 timer, fm = gettimer(ui, opts)
2874 timer, fm = gettimer(ui, opts)
2867 def write():
2875 def write():
2868 for i in range(100000):
2876 for i in range(100000):
2869 ui.write((b'Testing write performance\n'))
2877 ui.write((b'Testing write performance\n'))
2870 timer(write)
2878 timer(write)
2871 fm.end()
2879 fm.end()
2872
2880
2873 def uisetup(ui):
2881 def uisetup(ui):
2874 if (util.safehasattr(cmdutil, b'openrevlog') and
2882 if (util.safehasattr(cmdutil, b'openrevlog') and
2875 not util.safehasattr(commands, b'debugrevlogopts')):
2883 not util.safehasattr(commands, b'debugrevlogopts')):
2876 # for "historical portability":
2884 # for "historical portability":
2877 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
2885 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
2878 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
2886 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
2879 # openrevlog() should cause failure, because it has been
2887 # openrevlog() should cause failure, because it has been
2880 # available since 3.5 (or 49c583ca48c4).
2888 # available since 3.5 (or 49c583ca48c4).
2881 def openrevlog(orig, repo, cmd, file_, opts):
2889 def openrevlog(orig, repo, cmd, file_, opts):
2882 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
2890 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
2883 raise error.Abort(b"This version doesn't support --dir option",
2891 raise error.Abort(b"This version doesn't support --dir option",
2884 hint=b"use 3.5 or later")
2892 hint=b"use 3.5 or later")
2885 return orig(repo, cmd, file_, opts)
2893 return orig(repo, cmd, file_, opts)
2886 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
2894 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
2887
2895
2888 @command(b'perfprogress', formatteropts + [
2896 @command(b'perfprogress', formatteropts + [
2889 (b'', b'topic', b'topic', b'topic for progress messages'),
2897 (b'', b'topic', b'topic', b'topic for progress messages'),
2890 (b'c', b'total', 1000000, b'total value we are progressing to'),
2898 (b'c', b'total', 1000000, b'total value we are progressing to'),
2891 ], norepo=True)
2899 ], norepo=True)
2892 def perfprogress(ui, topic=None, total=None, **opts):
2900 def perfprogress(ui, topic=None, total=None, **opts):
2893 """printing of progress bars"""
2901 """printing of progress bars"""
2894 opts = _byteskwargs(opts)
2902 opts = _byteskwargs(opts)
2895
2903
2896 timer, fm = gettimer(ui, opts)
2904 timer, fm = gettimer(ui, opts)
2897
2905
2898 def doprogress():
2906 def doprogress():
2899 with ui.makeprogress(topic, total=total) as progress:
2907 with ui.makeprogress(topic, total=total) as progress:
2900 for i in pycompat.xrange(total):
2908 for i in pycompat.xrange(total):
2901 progress.increment()
2909 progress.increment()
2902
2910
2903 timer(doprogress)
2911 timer(doprogress)
2904 fm.end()
2912 fm.end()
General Comments 0
You need to be logged in to leave comments. Login now