##// END OF EJS Templates
perf: add a `pre-run` option...
marmoute -
r42551:563cd9a7 default
parent child Browse files
Show More
@@ -1,2863 +1,2875
1 # perf.py - performance test routines
1 # perf.py - performance test routines
2 '''helper extension to measure performance
2 '''helper extension to measure performance
3
3
4 Configurations
4 Configurations
5 ==============
5 ==============
6
6
7 ``perf``
7 ``perf``
8 --------
8 --------
9
9
10 ``all-timing``
10 ``all-timing``
11 When set, additional statistics will be reported for each benchmark: best,
11 When set, additional statistics will be reported for each benchmark: best,
12 worst, median average. If not set only the best timing is reported
12 worst, median average. If not set only the best timing is reported
13 (default: off).
13 (default: off).
14
14
15 ``presleep``
15 ``presleep``
16 number of second to wait before any group of runs (default: 1)
16 number of second to wait before any group of runs (default: 1)
17
17
18 ``pre-run``
19 number of run to perform before starting measurement.
20
18 ``run-limits``
21 ``run-limits``
19 Control the number of runs each benchmark will perform. The option value
22 Control the number of runs each benchmark will perform. The option value
20 should be a list of `<time>-<numberofrun>` pairs. After each run the
23 should be a list of `<time>-<numberofrun>` pairs. After each run the
21 conditions are considered in order with the following logic:
24 conditions are considered in order with the following logic:
22
25
23 If benchmark has been running for <time> seconds, and we have performed
26 If benchmark has been running for <time> seconds, and we have performed
24 <numberofrun> iterations, stop the benchmark,
27 <numberofrun> iterations, stop the benchmark,
25
28
26 The default value is: `3.0-100, 10.0-3`
29 The default value is: `3.0-100, 10.0-3`
27
30
28 ``stub``
31 ``stub``
29 When set, benchmarks will only be run once, useful for testing
32 When set, benchmarks will only be run once, useful for testing
30 (default: off)
33 (default: off)
31 '''
34 '''
32
35
33 # "historical portability" policy of perf.py:
36 # "historical portability" policy of perf.py:
34 #
37 #
35 # We have to do:
38 # We have to do:
36 # - make perf.py "loadable" with as wide Mercurial version as possible
39 # - make perf.py "loadable" with as wide Mercurial version as possible
37 # This doesn't mean that perf commands work correctly with that Mercurial.
40 # This doesn't mean that perf commands work correctly with that Mercurial.
38 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
41 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
39 # - make historical perf command work correctly with as wide Mercurial
42 # - make historical perf command work correctly with as wide Mercurial
40 # version as possible
43 # version as possible
41 #
44 #
42 # We have to do, if possible with reasonable cost:
45 # We have to do, if possible with reasonable cost:
43 # - make recent perf command for historical feature work correctly
46 # - make recent perf command for historical feature work correctly
44 # with early Mercurial
47 # with early Mercurial
45 #
48 #
46 # We don't have to do:
49 # We don't have to do:
47 # - make perf command for recent feature work correctly with early
50 # - make perf command for recent feature work correctly with early
48 # Mercurial
51 # Mercurial
49
52
50 from __future__ import absolute_import
53 from __future__ import absolute_import
51 import contextlib
54 import contextlib
52 import functools
55 import functools
53 import gc
56 import gc
54 import os
57 import os
55 import random
58 import random
56 import shutil
59 import shutil
57 import struct
60 import struct
58 import sys
61 import sys
59 import tempfile
62 import tempfile
60 import threading
63 import threading
61 import time
64 import time
62 from mercurial import (
65 from mercurial import (
63 changegroup,
66 changegroup,
64 cmdutil,
67 cmdutil,
65 commands,
68 commands,
66 copies,
69 copies,
67 error,
70 error,
68 extensions,
71 extensions,
69 hg,
72 hg,
70 mdiff,
73 mdiff,
71 merge,
74 merge,
72 revlog,
75 revlog,
73 util,
76 util,
74 )
77 )
75
78
76 # for "historical portability":
79 # for "historical portability":
77 # try to import modules separately (in dict order), and ignore
80 # try to import modules separately (in dict order), and ignore
78 # failure, because these aren't available with early Mercurial
81 # failure, because these aren't available with early Mercurial
79 try:
82 try:
80 from mercurial import branchmap # since 2.5 (or bcee63733aad)
83 from mercurial import branchmap # since 2.5 (or bcee63733aad)
81 except ImportError:
84 except ImportError:
82 pass
85 pass
83 try:
86 try:
84 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
87 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
85 except ImportError:
88 except ImportError:
86 pass
89 pass
87 try:
90 try:
88 from mercurial import registrar # since 3.7 (or 37d50250b696)
91 from mercurial import registrar # since 3.7 (or 37d50250b696)
89 dir(registrar) # forcibly load it
92 dir(registrar) # forcibly load it
90 except ImportError:
93 except ImportError:
91 registrar = None
94 registrar = None
92 try:
95 try:
93 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
96 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
94 except ImportError:
97 except ImportError:
95 pass
98 pass
96 try:
99 try:
97 from mercurial.utils import repoviewutil # since 5.0
100 from mercurial.utils import repoviewutil # since 5.0
98 except ImportError:
101 except ImportError:
99 repoviewutil = None
102 repoviewutil = None
100 try:
103 try:
101 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
104 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
102 except ImportError:
105 except ImportError:
103 pass
106 pass
104 try:
107 try:
105 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
108 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
106 except ImportError:
109 except ImportError:
107 pass
110 pass
108
111
109
112
110 def identity(a):
113 def identity(a):
111 return a
114 return a
112
115
113 try:
116 try:
114 from mercurial import pycompat
117 from mercurial import pycompat
115 getargspec = pycompat.getargspec # added to module after 4.5
118 getargspec = pycompat.getargspec # added to module after 4.5
116 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
119 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
117 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
120 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
118 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
121 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
119 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
122 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
120 if pycompat.ispy3:
123 if pycompat.ispy3:
121 _maxint = sys.maxsize # per py3 docs for replacing maxint
124 _maxint = sys.maxsize # per py3 docs for replacing maxint
122 else:
125 else:
123 _maxint = sys.maxint
126 _maxint = sys.maxint
124 except (ImportError, AttributeError):
127 except (ImportError, AttributeError):
125 import inspect
128 import inspect
126 getargspec = inspect.getargspec
129 getargspec = inspect.getargspec
127 _byteskwargs = identity
130 _byteskwargs = identity
128 fsencode = identity # no py3 support
131 fsencode = identity # no py3 support
129 _maxint = sys.maxint # no py3 support
132 _maxint = sys.maxint # no py3 support
130 _sysstr = lambda x: x # no py3 support
133 _sysstr = lambda x: x # no py3 support
131 _xrange = xrange
134 _xrange = xrange
132
135
133 try:
136 try:
134 # 4.7+
137 # 4.7+
135 queue = pycompat.queue.Queue
138 queue = pycompat.queue.Queue
136 except (AttributeError, ImportError):
139 except (AttributeError, ImportError):
137 # <4.7.
140 # <4.7.
138 try:
141 try:
139 queue = pycompat.queue
142 queue = pycompat.queue
140 except (AttributeError, ImportError):
143 except (AttributeError, ImportError):
141 queue = util.queue
144 queue = util.queue
142
145
143 try:
146 try:
144 from mercurial import logcmdutil
147 from mercurial import logcmdutil
145 makelogtemplater = logcmdutil.maketemplater
148 makelogtemplater = logcmdutil.maketemplater
146 except (AttributeError, ImportError):
149 except (AttributeError, ImportError):
147 try:
150 try:
148 makelogtemplater = cmdutil.makelogtemplater
151 makelogtemplater = cmdutil.makelogtemplater
149 except (AttributeError, ImportError):
152 except (AttributeError, ImportError):
150 makelogtemplater = None
153 makelogtemplater = None
151
154
152 # for "historical portability":
155 # for "historical portability":
153 # define util.safehasattr forcibly, because util.safehasattr has been
156 # define util.safehasattr forcibly, because util.safehasattr has been
154 # available since 1.9.3 (or 94b200a11cf7)
157 # available since 1.9.3 (or 94b200a11cf7)
155 _undefined = object()
158 _undefined = object()
156 def safehasattr(thing, attr):
159 def safehasattr(thing, attr):
157 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
160 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
158 setattr(util, 'safehasattr', safehasattr)
161 setattr(util, 'safehasattr', safehasattr)
159
162
160 # for "historical portability":
163 # for "historical portability":
161 # define util.timer forcibly, because util.timer has been available
164 # define util.timer forcibly, because util.timer has been available
162 # since ae5d60bb70c9
165 # since ae5d60bb70c9
163 if safehasattr(time, 'perf_counter'):
166 if safehasattr(time, 'perf_counter'):
164 util.timer = time.perf_counter
167 util.timer = time.perf_counter
165 elif os.name == b'nt':
168 elif os.name == b'nt':
166 util.timer = time.clock
169 util.timer = time.clock
167 else:
170 else:
168 util.timer = time.time
171 util.timer = time.time
169
172
170 # for "historical portability":
173 # for "historical portability":
171 # use locally defined empty option list, if formatteropts isn't
174 # use locally defined empty option list, if formatteropts isn't
172 # available, because commands.formatteropts has been available since
175 # available, because commands.formatteropts has been available since
173 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
176 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
174 # available since 2.2 (or ae5f92e154d3)
177 # available since 2.2 (or ae5f92e154d3)
175 formatteropts = getattr(cmdutil, "formatteropts",
178 formatteropts = getattr(cmdutil, "formatteropts",
176 getattr(commands, "formatteropts", []))
179 getattr(commands, "formatteropts", []))
177
180
178 # for "historical portability":
181 # for "historical portability":
179 # use locally defined option list, if debugrevlogopts isn't available,
182 # use locally defined option list, if debugrevlogopts isn't available,
180 # because commands.debugrevlogopts has been available since 3.7 (or
183 # because commands.debugrevlogopts has been available since 3.7 (or
181 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
184 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
182 # since 1.9 (or a79fea6b3e77).
185 # since 1.9 (or a79fea6b3e77).
183 revlogopts = getattr(cmdutil, "debugrevlogopts",
186 revlogopts = getattr(cmdutil, "debugrevlogopts",
184 getattr(commands, "debugrevlogopts", [
187 getattr(commands, "debugrevlogopts", [
185 (b'c', b'changelog', False, (b'open changelog')),
188 (b'c', b'changelog', False, (b'open changelog')),
186 (b'm', b'manifest', False, (b'open manifest')),
189 (b'm', b'manifest', False, (b'open manifest')),
187 (b'', b'dir', False, (b'open directory manifest')),
190 (b'', b'dir', False, (b'open directory manifest')),
188 ]))
191 ]))
189
192
190 cmdtable = {}
193 cmdtable = {}
191
194
192 # for "historical portability":
195 # for "historical portability":
193 # define parsealiases locally, because cmdutil.parsealiases has been
196 # define parsealiases locally, because cmdutil.parsealiases has been
194 # available since 1.5 (or 6252852b4332)
197 # available since 1.5 (or 6252852b4332)
195 def parsealiases(cmd):
198 def parsealiases(cmd):
196 return cmd.split(b"|")
199 return cmd.split(b"|")
197
200
198 if safehasattr(registrar, 'command'):
201 if safehasattr(registrar, 'command'):
199 command = registrar.command(cmdtable)
202 command = registrar.command(cmdtable)
200 elif safehasattr(cmdutil, 'command'):
203 elif safehasattr(cmdutil, 'command'):
201 command = cmdutil.command(cmdtable)
204 command = cmdutil.command(cmdtable)
202 if b'norepo' not in getargspec(command).args:
205 if b'norepo' not in getargspec(command).args:
203 # for "historical portability":
206 # for "historical portability":
204 # wrap original cmdutil.command, because "norepo" option has
207 # wrap original cmdutil.command, because "norepo" option has
205 # been available since 3.1 (or 75a96326cecb)
208 # been available since 3.1 (or 75a96326cecb)
206 _command = command
209 _command = command
207 def command(name, options=(), synopsis=None, norepo=False):
210 def command(name, options=(), synopsis=None, norepo=False):
208 if norepo:
211 if norepo:
209 commands.norepo += b' %s' % b' '.join(parsealiases(name))
212 commands.norepo += b' %s' % b' '.join(parsealiases(name))
210 return _command(name, list(options), synopsis)
213 return _command(name, list(options), synopsis)
211 else:
214 else:
212 # for "historical portability":
215 # for "historical portability":
213 # define "@command" annotation locally, because cmdutil.command
216 # define "@command" annotation locally, because cmdutil.command
214 # has been available since 1.9 (or 2daa5179e73f)
217 # has been available since 1.9 (or 2daa5179e73f)
215 def command(name, options=(), synopsis=None, norepo=False):
218 def command(name, options=(), synopsis=None, norepo=False):
216 def decorator(func):
219 def decorator(func):
217 if synopsis:
220 if synopsis:
218 cmdtable[name] = func, list(options), synopsis
221 cmdtable[name] = func, list(options), synopsis
219 else:
222 else:
220 cmdtable[name] = func, list(options)
223 cmdtable[name] = func, list(options)
221 if norepo:
224 if norepo:
222 commands.norepo += b' %s' % b' '.join(parsealiases(name))
225 commands.norepo += b' %s' % b' '.join(parsealiases(name))
223 return func
226 return func
224 return decorator
227 return decorator
225
228
226 try:
229 try:
227 import mercurial.registrar
230 import mercurial.registrar
228 import mercurial.configitems
231 import mercurial.configitems
229 configtable = {}
232 configtable = {}
230 configitem = mercurial.registrar.configitem(configtable)
233 configitem = mercurial.registrar.configitem(configtable)
231 configitem(b'perf', b'presleep',
234 configitem(b'perf', b'presleep',
232 default=mercurial.configitems.dynamicdefault,
235 default=mercurial.configitems.dynamicdefault,
233 )
236 )
234 configitem(b'perf', b'stub',
237 configitem(b'perf', b'stub',
235 default=mercurial.configitems.dynamicdefault,
238 default=mercurial.configitems.dynamicdefault,
236 )
239 )
237 configitem(b'perf', b'parentscount',
240 configitem(b'perf', b'parentscount',
238 default=mercurial.configitems.dynamicdefault,
241 default=mercurial.configitems.dynamicdefault,
239 )
242 )
240 configitem(b'perf', b'all-timing',
243 configitem(b'perf', b'all-timing',
241 default=mercurial.configitems.dynamicdefault,
244 default=mercurial.configitems.dynamicdefault,
242 )
245 )
246 configitem(b'perf', b'pre-run',
247 default=mercurial.configitems.dynamicdefault,
248 )
243 configitem(b'perf', b'run-limits',
249 configitem(b'perf', b'run-limits',
244 default=mercurial.configitems.dynamicdefault,
250 default=mercurial.configitems.dynamicdefault,
245 )
251 )
246 except (ImportError, AttributeError):
252 except (ImportError, AttributeError):
247 pass
253 pass
248
254
249 def getlen(ui):
255 def getlen(ui):
250 if ui.configbool(b"perf", b"stub", False):
256 if ui.configbool(b"perf", b"stub", False):
251 return lambda x: 1
257 return lambda x: 1
252 return len
258 return len
253
259
254 def gettimer(ui, opts=None):
260 def gettimer(ui, opts=None):
255 """return a timer function and formatter: (timer, formatter)
261 """return a timer function and formatter: (timer, formatter)
256
262
257 This function exists to gather the creation of formatter in a single
263 This function exists to gather the creation of formatter in a single
258 place instead of duplicating it in all performance commands."""
264 place instead of duplicating it in all performance commands."""
259
265
260 # enforce an idle period before execution to counteract power management
266 # enforce an idle period before execution to counteract power management
261 # experimental config: perf.presleep
267 # experimental config: perf.presleep
262 time.sleep(getint(ui, b"perf", b"presleep", 1))
268 time.sleep(getint(ui, b"perf", b"presleep", 1))
263
269
264 if opts is None:
270 if opts is None:
265 opts = {}
271 opts = {}
266 # redirect all to stderr unless buffer api is in use
272 # redirect all to stderr unless buffer api is in use
267 if not ui._buffers:
273 if not ui._buffers:
268 ui = ui.copy()
274 ui = ui.copy()
269 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
275 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
270 if uifout:
276 if uifout:
271 # for "historical portability":
277 # for "historical portability":
272 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
278 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
273 uifout.set(ui.ferr)
279 uifout.set(ui.ferr)
274
280
275 # get a formatter
281 # get a formatter
276 uiformatter = getattr(ui, 'formatter', None)
282 uiformatter = getattr(ui, 'formatter', None)
277 if uiformatter:
283 if uiformatter:
278 fm = uiformatter(b'perf', opts)
284 fm = uiformatter(b'perf', opts)
279 else:
285 else:
280 # for "historical portability":
286 # for "historical portability":
281 # define formatter locally, because ui.formatter has been
287 # define formatter locally, because ui.formatter has been
282 # available since 2.2 (or ae5f92e154d3)
288 # available since 2.2 (or ae5f92e154d3)
283 from mercurial import node
289 from mercurial import node
284 class defaultformatter(object):
290 class defaultformatter(object):
285 """Minimized composition of baseformatter and plainformatter
291 """Minimized composition of baseformatter and plainformatter
286 """
292 """
287 def __init__(self, ui, topic, opts):
293 def __init__(self, ui, topic, opts):
288 self._ui = ui
294 self._ui = ui
289 if ui.debugflag:
295 if ui.debugflag:
290 self.hexfunc = node.hex
296 self.hexfunc = node.hex
291 else:
297 else:
292 self.hexfunc = node.short
298 self.hexfunc = node.short
293 def __nonzero__(self):
299 def __nonzero__(self):
294 return False
300 return False
295 __bool__ = __nonzero__
301 __bool__ = __nonzero__
296 def startitem(self):
302 def startitem(self):
297 pass
303 pass
298 def data(self, **data):
304 def data(self, **data):
299 pass
305 pass
300 def write(self, fields, deftext, *fielddata, **opts):
306 def write(self, fields, deftext, *fielddata, **opts):
301 self._ui.write(deftext % fielddata, **opts)
307 self._ui.write(deftext % fielddata, **opts)
302 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
308 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
303 if cond:
309 if cond:
304 self._ui.write(deftext % fielddata, **opts)
310 self._ui.write(deftext % fielddata, **opts)
305 def plain(self, text, **opts):
311 def plain(self, text, **opts):
306 self._ui.write(text, **opts)
312 self._ui.write(text, **opts)
307 def end(self):
313 def end(self):
308 pass
314 pass
309 fm = defaultformatter(ui, b'perf', opts)
315 fm = defaultformatter(ui, b'perf', opts)
310
316
311 # stub function, runs code only once instead of in a loop
317 # stub function, runs code only once instead of in a loop
312 # experimental config: perf.stub
318 # experimental config: perf.stub
313 if ui.configbool(b"perf", b"stub", False):
319 if ui.configbool(b"perf", b"stub", False):
314 return functools.partial(stub_timer, fm), fm
320 return functools.partial(stub_timer, fm), fm
315
321
316 # experimental config: perf.all-timing
322 # experimental config: perf.all-timing
317 displayall = ui.configbool(b"perf", b"all-timing", False)
323 displayall = ui.configbool(b"perf", b"all-timing", False)
318
324
319 # experimental config: perf.run-limits
325 # experimental config: perf.run-limits
320 limitspec = ui.configlist(b"perf", b"run-limits", [])
326 limitspec = ui.configlist(b"perf", b"run-limits", [])
321 limits = []
327 limits = []
322 for item in limitspec:
328 for item in limitspec:
323 parts = item.split(b'-', 1)
329 parts = item.split(b'-', 1)
324 if len(parts) < 2:
330 if len(parts) < 2:
325 ui.warn((b'malformatted run limit entry, missing "-": %s\n'
331 ui.warn((b'malformatted run limit entry, missing "-": %s\n'
326 % item))
332 % item))
327 continue
333 continue
328 try:
334 try:
329 time_limit = float(pycompat.sysstr(parts[0]))
335 time_limit = float(pycompat.sysstr(parts[0]))
330 except ValueError as e:
336 except ValueError as e:
331 ui.warn((b'malformatted run limit entry, %s: %s\n'
337 ui.warn((b'malformatted run limit entry, %s: %s\n'
332 % (pycompat.bytestr(e), item)))
338 % (pycompat.bytestr(e), item)))
333 continue
339 continue
334 try:
340 try:
335 run_limit = int(pycompat.sysstr(parts[1]))
341 run_limit = int(pycompat.sysstr(parts[1]))
336 except ValueError as e:
342 except ValueError as e:
337 ui.warn((b'malformatted run limit entry, %s: %s\n'
343 ui.warn((b'malformatted run limit entry, %s: %s\n'
338 % (pycompat.bytestr(e), item)))
344 % (pycompat.bytestr(e), item)))
339 continue
345 continue
340 limits.append((time_limit, run_limit))
346 limits.append((time_limit, run_limit))
341 if not limits:
347 if not limits:
342 limits = DEFAULTLIMITS
348 limits = DEFAULTLIMITS
343
349
344 t = functools.partial(_timer, fm, displayall=displayall, limits=limits)
350 prerun = getint(ui, b"perf", b"pre-run", 0)
351 t = functools.partial(_timer, fm, displayall=displayall, limits=limits,
352 prerun=prerun)
345 return t, fm
353 return t, fm
346
354
347 def stub_timer(fm, func, setup=None, title=None):
355 def stub_timer(fm, func, setup=None, title=None):
348 if setup is not None:
356 if setup is not None:
349 setup()
357 setup()
350 func()
358 func()
351
359
352 @contextlib.contextmanager
360 @contextlib.contextmanager
353 def timeone():
361 def timeone():
354 r = []
362 r = []
355 ostart = os.times()
363 ostart = os.times()
356 cstart = util.timer()
364 cstart = util.timer()
357 yield r
365 yield r
358 cstop = util.timer()
366 cstop = util.timer()
359 ostop = os.times()
367 ostop = os.times()
360 a, b = ostart, ostop
368 a, b = ostart, ostop
361 r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
369 r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
362
370
363
371
364 # list of stop condition (elapsed time, minimal run count)
372 # list of stop condition (elapsed time, minimal run count)
365 DEFAULTLIMITS = (
373 DEFAULTLIMITS = (
366 (3.0, 100),
374 (3.0, 100),
367 (10.0, 3),
375 (10.0, 3),
368 )
376 )
369
377
370 def _timer(fm, func, setup=None, title=None, displayall=False,
378 def _timer(fm, func, setup=None, title=None, displayall=False,
371 limits=DEFAULTLIMITS):
379 limits=DEFAULTLIMITS, prerun=0):
372 gc.collect()
380 gc.collect()
373 results = []
381 results = []
374 begin = util.timer()
382 begin = util.timer()
375 count = 0
383 count = 0
384 for i in xrange(prerun):
385 if setup is not None:
386 setup()
387 func()
376 keepgoing = True
388 keepgoing = True
377 while keepgoing:
389 while keepgoing:
378 if setup is not None:
390 if setup is not None:
379 setup()
391 setup()
380 with timeone() as item:
392 with timeone() as item:
381 r = func()
393 r = func()
382 count += 1
394 count += 1
383 results.append(item[0])
395 results.append(item[0])
384 cstop = util.timer()
396 cstop = util.timer()
385 # Look for a stop condition.
397 # Look for a stop condition.
386 elapsed = cstop - begin
398 elapsed = cstop - begin
387 for t, mincount in limits:
399 for t, mincount in limits:
388 if elapsed >= t and count >= mincount:
400 if elapsed >= t and count >= mincount:
389 keepgoing = False
401 keepgoing = False
390 break
402 break
391
403
392 formatone(fm, results, title=title, result=r,
404 formatone(fm, results, title=title, result=r,
393 displayall=displayall)
405 displayall=displayall)
394
406
395 def formatone(fm, timings, title=None, result=None, displayall=False):
407 def formatone(fm, timings, title=None, result=None, displayall=False):
396
408
397 count = len(timings)
409 count = len(timings)
398
410
399 fm.startitem()
411 fm.startitem()
400
412
401 if title:
413 if title:
402 fm.write(b'title', b'! %s\n', title)
414 fm.write(b'title', b'! %s\n', title)
403 if result:
415 if result:
404 fm.write(b'result', b'! result: %s\n', result)
416 fm.write(b'result', b'! result: %s\n', result)
405 def display(role, entry):
417 def display(role, entry):
406 prefix = b''
418 prefix = b''
407 if role != b'best':
419 if role != b'best':
408 prefix = b'%s.' % role
420 prefix = b'%s.' % role
409 fm.plain(b'!')
421 fm.plain(b'!')
410 fm.write(prefix + b'wall', b' wall %f', entry[0])
422 fm.write(prefix + b'wall', b' wall %f', entry[0])
411 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
423 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
412 fm.write(prefix + b'user', b' user %f', entry[1])
424 fm.write(prefix + b'user', b' user %f', entry[1])
413 fm.write(prefix + b'sys', b' sys %f', entry[2])
425 fm.write(prefix + b'sys', b' sys %f', entry[2])
414 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
426 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
415 fm.plain(b'\n')
427 fm.plain(b'\n')
416 timings.sort()
428 timings.sort()
417 min_val = timings[0]
429 min_val = timings[0]
418 display(b'best', min_val)
430 display(b'best', min_val)
419 if displayall:
431 if displayall:
420 max_val = timings[-1]
432 max_val = timings[-1]
421 display(b'max', max_val)
433 display(b'max', max_val)
422 avg = tuple([sum(x) / count for x in zip(*timings)])
434 avg = tuple([sum(x) / count for x in zip(*timings)])
423 display(b'avg', avg)
435 display(b'avg', avg)
424 median = timings[len(timings) // 2]
436 median = timings[len(timings) // 2]
425 display(b'median', median)
437 display(b'median', median)
426
438
427 # utilities for historical portability
439 # utilities for historical portability
428
440
429 def getint(ui, section, name, default):
441 def getint(ui, section, name, default):
430 # for "historical portability":
442 # for "historical portability":
431 # ui.configint has been available since 1.9 (or fa2b596db182)
443 # ui.configint has been available since 1.9 (or fa2b596db182)
432 v = ui.config(section, name, None)
444 v = ui.config(section, name, None)
433 if v is None:
445 if v is None:
434 return default
446 return default
435 try:
447 try:
436 return int(v)
448 return int(v)
437 except ValueError:
449 except ValueError:
438 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
450 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
439 % (section, name, v))
451 % (section, name, v))
440
452
441 def safeattrsetter(obj, name, ignoremissing=False):
453 def safeattrsetter(obj, name, ignoremissing=False):
442 """Ensure that 'obj' has 'name' attribute before subsequent setattr
454 """Ensure that 'obj' has 'name' attribute before subsequent setattr
443
455
444 This function is aborted, if 'obj' doesn't have 'name' attribute
456 This function is aborted, if 'obj' doesn't have 'name' attribute
445 at runtime. This avoids overlooking removal of an attribute, which
457 at runtime. This avoids overlooking removal of an attribute, which
446 breaks assumption of performance measurement, in the future.
458 breaks assumption of performance measurement, in the future.
447
459
448 This function returns the object to (1) assign a new value, and
460 This function returns the object to (1) assign a new value, and
449 (2) restore an original value to the attribute.
461 (2) restore an original value to the attribute.
450
462
451 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
463 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
452 abortion, and this function returns None. This is useful to
464 abortion, and this function returns None. This is useful to
453 examine an attribute, which isn't ensured in all Mercurial
465 examine an attribute, which isn't ensured in all Mercurial
454 versions.
466 versions.
455 """
467 """
456 if not util.safehasattr(obj, name):
468 if not util.safehasattr(obj, name):
457 if ignoremissing:
469 if ignoremissing:
458 return None
470 return None
459 raise error.Abort((b"missing attribute %s of %s might break assumption"
471 raise error.Abort((b"missing attribute %s of %s might break assumption"
460 b" of performance measurement") % (name, obj))
472 b" of performance measurement") % (name, obj))
461
473
462 origvalue = getattr(obj, _sysstr(name))
474 origvalue = getattr(obj, _sysstr(name))
463 class attrutil(object):
475 class attrutil(object):
464 def set(self, newvalue):
476 def set(self, newvalue):
465 setattr(obj, _sysstr(name), newvalue)
477 setattr(obj, _sysstr(name), newvalue)
466 def restore(self):
478 def restore(self):
467 setattr(obj, _sysstr(name), origvalue)
479 setattr(obj, _sysstr(name), origvalue)
468
480
469 return attrutil()
481 return attrutil()
470
482
471 # utilities to examine each internal API changes
483 # utilities to examine each internal API changes
472
484
473 def getbranchmapsubsettable():
485 def getbranchmapsubsettable():
474 # for "historical portability":
486 # for "historical portability":
475 # subsettable is defined in:
487 # subsettable is defined in:
476 # - branchmap since 2.9 (or 175c6fd8cacc)
488 # - branchmap since 2.9 (or 175c6fd8cacc)
477 # - repoview since 2.5 (or 59a9f18d4587)
489 # - repoview since 2.5 (or 59a9f18d4587)
478 # - repoviewutil since 5.0
490 # - repoviewutil since 5.0
479 for mod in (branchmap, repoview, repoviewutil):
491 for mod in (branchmap, repoview, repoviewutil):
480 subsettable = getattr(mod, 'subsettable', None)
492 subsettable = getattr(mod, 'subsettable', None)
481 if subsettable:
493 if subsettable:
482 return subsettable
494 return subsettable
483
495
484 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
496 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
485 # branchmap and repoview modules exist, but subsettable attribute
497 # branchmap and repoview modules exist, but subsettable attribute
486 # doesn't)
498 # doesn't)
487 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
499 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
488 hint=b"use 2.5 or later")
500 hint=b"use 2.5 or later")
489
501
490 def getsvfs(repo):
502 def getsvfs(repo):
491 """Return appropriate object to access files under .hg/store
503 """Return appropriate object to access files under .hg/store
492 """
504 """
493 # for "historical portability":
505 # for "historical portability":
494 # repo.svfs has been available since 2.3 (or 7034365089bf)
506 # repo.svfs has been available since 2.3 (or 7034365089bf)
495 svfs = getattr(repo, 'svfs', None)
507 svfs = getattr(repo, 'svfs', None)
496 if svfs:
508 if svfs:
497 return svfs
509 return svfs
498 else:
510 else:
499 return getattr(repo, 'sopener')
511 return getattr(repo, 'sopener')
500
512
501 def getvfs(repo):
513 def getvfs(repo):
502 """Return appropriate object to access files under .hg
514 """Return appropriate object to access files under .hg
503 """
515 """
504 # for "historical portability":
516 # for "historical portability":
505 # repo.vfs has been available since 2.3 (or 7034365089bf)
517 # repo.vfs has been available since 2.3 (or 7034365089bf)
506 vfs = getattr(repo, 'vfs', None)
518 vfs = getattr(repo, 'vfs', None)
507 if vfs:
519 if vfs:
508 return vfs
520 return vfs
509 else:
521 else:
510 return getattr(repo, 'opener')
522 return getattr(repo, 'opener')
511
523
512 def repocleartagscachefunc(repo):
524 def repocleartagscachefunc(repo):
513 """Return the function to clear tags cache according to repo internal API
525 """Return the function to clear tags cache according to repo internal API
514 """
526 """
515 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
527 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
516 # in this case, setattr(repo, '_tagscache', None) or so isn't
528 # in this case, setattr(repo, '_tagscache', None) or so isn't
517 # correct way to clear tags cache, because existing code paths
529 # correct way to clear tags cache, because existing code paths
518 # expect _tagscache to be a structured object.
530 # expect _tagscache to be a structured object.
519 def clearcache():
531 def clearcache():
520 # _tagscache has been filteredpropertycache since 2.5 (or
532 # _tagscache has been filteredpropertycache since 2.5 (or
521 # 98c867ac1330), and delattr() can't work in such case
533 # 98c867ac1330), and delattr() can't work in such case
522 if b'_tagscache' in vars(repo):
534 if b'_tagscache' in vars(repo):
523 del repo.__dict__[b'_tagscache']
535 del repo.__dict__[b'_tagscache']
524 return clearcache
536 return clearcache
525
537
526 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
538 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
527 if repotags: # since 1.4 (or 5614a628d173)
539 if repotags: # since 1.4 (or 5614a628d173)
528 return lambda : repotags.set(None)
540 return lambda : repotags.set(None)
529
541
530 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
542 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
531 if repotagscache: # since 0.6 (or d7df759d0e97)
543 if repotagscache: # since 0.6 (or d7df759d0e97)
532 return lambda : repotagscache.set(None)
544 return lambda : repotagscache.set(None)
533
545
534 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
546 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
535 # this point, but it isn't so problematic, because:
547 # this point, but it isn't so problematic, because:
536 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
548 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
537 # in perftags() causes failure soon
549 # in perftags() causes failure soon
538 # - perf.py itself has been available since 1.1 (or eb240755386d)
550 # - perf.py itself has been available since 1.1 (or eb240755386d)
539 raise error.Abort((b"tags API of this hg command is unknown"))
551 raise error.Abort((b"tags API of this hg command is unknown"))
540
552
541 # utilities to clear cache
553 # utilities to clear cache
542
554
543 def clearfilecache(obj, attrname):
555 def clearfilecache(obj, attrname):
544 unfiltered = getattr(obj, 'unfiltered', None)
556 unfiltered = getattr(obj, 'unfiltered', None)
545 if unfiltered is not None:
557 if unfiltered is not None:
546 obj = obj.unfiltered()
558 obj = obj.unfiltered()
547 if attrname in vars(obj):
559 if attrname in vars(obj):
548 delattr(obj, attrname)
560 delattr(obj, attrname)
549 obj._filecache.pop(attrname, None)
561 obj._filecache.pop(attrname, None)
550
562
551 def clearchangelog(repo):
563 def clearchangelog(repo):
552 if repo is not repo.unfiltered():
564 if repo is not repo.unfiltered():
553 object.__setattr__(repo, r'_clcachekey', None)
565 object.__setattr__(repo, r'_clcachekey', None)
554 object.__setattr__(repo, r'_clcache', None)
566 object.__setattr__(repo, r'_clcache', None)
555 clearfilecache(repo.unfiltered(), 'changelog')
567 clearfilecache(repo.unfiltered(), 'changelog')
556
568
557 # perf commands
569 # perf commands
558
570
559 @command(b'perfwalk', formatteropts)
571 @command(b'perfwalk', formatteropts)
560 def perfwalk(ui, repo, *pats, **opts):
572 def perfwalk(ui, repo, *pats, **opts):
561 opts = _byteskwargs(opts)
573 opts = _byteskwargs(opts)
562 timer, fm = gettimer(ui, opts)
574 timer, fm = gettimer(ui, opts)
563 m = scmutil.match(repo[None], pats, {})
575 m = scmutil.match(repo[None], pats, {})
564 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
576 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
565 ignored=False))))
577 ignored=False))))
566 fm.end()
578 fm.end()
567
579
568 @command(b'perfannotate', formatteropts)
580 @command(b'perfannotate', formatteropts)
569 def perfannotate(ui, repo, f, **opts):
581 def perfannotate(ui, repo, f, **opts):
570 opts = _byteskwargs(opts)
582 opts = _byteskwargs(opts)
571 timer, fm = gettimer(ui, opts)
583 timer, fm = gettimer(ui, opts)
572 fc = repo[b'.'][f]
584 fc = repo[b'.'][f]
573 timer(lambda: len(fc.annotate(True)))
585 timer(lambda: len(fc.annotate(True)))
574 fm.end()
586 fm.end()
575
587
576 @command(b'perfstatus',
588 @command(b'perfstatus',
577 [(b'u', b'unknown', False,
589 [(b'u', b'unknown', False,
578 b'ask status to look for unknown files')] + formatteropts)
590 b'ask status to look for unknown files')] + formatteropts)
579 def perfstatus(ui, repo, **opts):
591 def perfstatus(ui, repo, **opts):
580 opts = _byteskwargs(opts)
592 opts = _byteskwargs(opts)
581 #m = match.always(repo.root, repo.getcwd())
593 #m = match.always(repo.root, repo.getcwd())
582 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
594 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
583 # False))))
595 # False))))
584 timer, fm = gettimer(ui, opts)
596 timer, fm = gettimer(ui, opts)
585 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
597 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
586 fm.end()
598 fm.end()
587
599
588 @command(b'perfaddremove', formatteropts)
600 @command(b'perfaddremove', formatteropts)
589 def perfaddremove(ui, repo, **opts):
601 def perfaddremove(ui, repo, **opts):
590 opts = _byteskwargs(opts)
602 opts = _byteskwargs(opts)
591 timer, fm = gettimer(ui, opts)
603 timer, fm = gettimer(ui, opts)
592 try:
604 try:
593 oldquiet = repo.ui.quiet
605 oldquiet = repo.ui.quiet
594 repo.ui.quiet = True
606 repo.ui.quiet = True
595 matcher = scmutil.match(repo[None])
607 matcher = scmutil.match(repo[None])
596 opts[b'dry_run'] = True
608 opts[b'dry_run'] = True
597 if b'uipathfn' in getargspec(scmutil.addremove).args:
609 if b'uipathfn' in getargspec(scmutil.addremove).args:
598 uipathfn = scmutil.getuipathfn(repo)
610 uipathfn = scmutil.getuipathfn(repo)
599 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
611 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
600 else:
612 else:
601 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
613 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
602 finally:
614 finally:
603 repo.ui.quiet = oldquiet
615 repo.ui.quiet = oldquiet
604 fm.end()
616 fm.end()
605
617
606 def clearcaches(cl):
618 def clearcaches(cl):
607 # behave somewhat consistently across internal API changes
619 # behave somewhat consistently across internal API changes
608 if util.safehasattr(cl, b'clearcaches'):
620 if util.safehasattr(cl, b'clearcaches'):
609 cl.clearcaches()
621 cl.clearcaches()
610 elif util.safehasattr(cl, b'_nodecache'):
622 elif util.safehasattr(cl, b'_nodecache'):
611 from mercurial.node import nullid, nullrev
623 from mercurial.node import nullid, nullrev
612 cl._nodecache = {nullid: nullrev}
624 cl._nodecache = {nullid: nullrev}
613 cl._nodepos = None
625 cl._nodepos = None
614
626
615 @command(b'perfheads', formatteropts)
627 @command(b'perfheads', formatteropts)
616 def perfheads(ui, repo, **opts):
628 def perfheads(ui, repo, **opts):
617 """benchmark the computation of a changelog heads"""
629 """benchmark the computation of a changelog heads"""
618 opts = _byteskwargs(opts)
630 opts = _byteskwargs(opts)
619 timer, fm = gettimer(ui, opts)
631 timer, fm = gettimer(ui, opts)
620 cl = repo.changelog
632 cl = repo.changelog
621 def s():
633 def s():
622 clearcaches(cl)
634 clearcaches(cl)
623 def d():
635 def d():
624 len(cl.headrevs())
636 len(cl.headrevs())
625 timer(d, setup=s)
637 timer(d, setup=s)
626 fm.end()
638 fm.end()
627
639
628 @command(b'perftags', formatteropts+
640 @command(b'perftags', formatteropts+
629 [
641 [
630 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
642 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
631 ])
643 ])
632 def perftags(ui, repo, **opts):
644 def perftags(ui, repo, **opts):
633 opts = _byteskwargs(opts)
645 opts = _byteskwargs(opts)
634 timer, fm = gettimer(ui, opts)
646 timer, fm = gettimer(ui, opts)
635 repocleartagscache = repocleartagscachefunc(repo)
647 repocleartagscache = repocleartagscachefunc(repo)
636 clearrevlogs = opts[b'clear_revlogs']
648 clearrevlogs = opts[b'clear_revlogs']
637 def s():
649 def s():
638 if clearrevlogs:
650 if clearrevlogs:
639 clearchangelog(repo)
651 clearchangelog(repo)
640 clearfilecache(repo.unfiltered(), 'manifest')
652 clearfilecache(repo.unfiltered(), 'manifest')
641 repocleartagscache()
653 repocleartagscache()
642 def t():
654 def t():
643 return len(repo.tags())
655 return len(repo.tags())
644 timer(t, setup=s)
656 timer(t, setup=s)
645 fm.end()
657 fm.end()
646
658
647 @command(b'perfancestors', formatteropts)
659 @command(b'perfancestors', formatteropts)
648 def perfancestors(ui, repo, **opts):
660 def perfancestors(ui, repo, **opts):
649 opts = _byteskwargs(opts)
661 opts = _byteskwargs(opts)
650 timer, fm = gettimer(ui, opts)
662 timer, fm = gettimer(ui, opts)
651 heads = repo.changelog.headrevs()
663 heads = repo.changelog.headrevs()
652 def d():
664 def d():
653 for a in repo.changelog.ancestors(heads):
665 for a in repo.changelog.ancestors(heads):
654 pass
666 pass
655 timer(d)
667 timer(d)
656 fm.end()
668 fm.end()
657
669
658 @command(b'perfancestorset', formatteropts)
670 @command(b'perfancestorset', formatteropts)
659 def perfancestorset(ui, repo, revset, **opts):
671 def perfancestorset(ui, repo, revset, **opts):
660 opts = _byteskwargs(opts)
672 opts = _byteskwargs(opts)
661 timer, fm = gettimer(ui, opts)
673 timer, fm = gettimer(ui, opts)
662 revs = repo.revs(revset)
674 revs = repo.revs(revset)
663 heads = repo.changelog.headrevs()
675 heads = repo.changelog.headrevs()
664 def d():
676 def d():
665 s = repo.changelog.ancestors(heads)
677 s = repo.changelog.ancestors(heads)
666 for rev in revs:
678 for rev in revs:
667 rev in s
679 rev in s
668 timer(d)
680 timer(d)
669 fm.end()
681 fm.end()
670
682
671 @command(b'perfdiscovery', formatteropts, b'PATH')
683 @command(b'perfdiscovery', formatteropts, b'PATH')
672 def perfdiscovery(ui, repo, path, **opts):
684 def perfdiscovery(ui, repo, path, **opts):
673 """benchmark discovery between local repo and the peer at given path
685 """benchmark discovery between local repo and the peer at given path
674 """
686 """
675 repos = [repo, None]
687 repos = [repo, None]
676 timer, fm = gettimer(ui, opts)
688 timer, fm = gettimer(ui, opts)
677 path = ui.expandpath(path)
689 path = ui.expandpath(path)
678
690
679 def s():
691 def s():
680 repos[1] = hg.peer(ui, opts, path)
692 repos[1] = hg.peer(ui, opts, path)
681 def d():
693 def d():
682 setdiscovery.findcommonheads(ui, *repos)
694 setdiscovery.findcommonheads(ui, *repos)
683 timer(d, setup=s)
695 timer(d, setup=s)
684 fm.end()
696 fm.end()
685
697
686 @command(b'perfbookmarks', formatteropts +
698 @command(b'perfbookmarks', formatteropts +
687 [
699 [
688 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
700 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
689 ])
701 ])
690 def perfbookmarks(ui, repo, **opts):
702 def perfbookmarks(ui, repo, **opts):
691 """benchmark parsing bookmarks from disk to memory"""
703 """benchmark parsing bookmarks from disk to memory"""
692 opts = _byteskwargs(opts)
704 opts = _byteskwargs(opts)
693 timer, fm = gettimer(ui, opts)
705 timer, fm = gettimer(ui, opts)
694
706
695 clearrevlogs = opts[b'clear_revlogs']
707 clearrevlogs = opts[b'clear_revlogs']
696 def s():
708 def s():
697 if clearrevlogs:
709 if clearrevlogs:
698 clearchangelog(repo)
710 clearchangelog(repo)
699 clearfilecache(repo, b'_bookmarks')
711 clearfilecache(repo, b'_bookmarks')
700 def d():
712 def d():
701 repo._bookmarks
713 repo._bookmarks
702 timer(d, setup=s)
714 timer(d, setup=s)
703 fm.end()
715 fm.end()
704
716
705 @command(b'perfbundleread', formatteropts, b'BUNDLE')
717 @command(b'perfbundleread', formatteropts, b'BUNDLE')
706 def perfbundleread(ui, repo, bundlepath, **opts):
718 def perfbundleread(ui, repo, bundlepath, **opts):
707 """Benchmark reading of bundle files.
719 """Benchmark reading of bundle files.
708
720
709 This command is meant to isolate the I/O part of bundle reading as
721 This command is meant to isolate the I/O part of bundle reading as
710 much as possible.
722 much as possible.
711 """
723 """
712 from mercurial import (
724 from mercurial import (
713 bundle2,
725 bundle2,
714 exchange,
726 exchange,
715 streamclone,
727 streamclone,
716 )
728 )
717
729
718 opts = _byteskwargs(opts)
730 opts = _byteskwargs(opts)
719
731
720 def makebench(fn):
732 def makebench(fn):
721 def run():
733 def run():
722 with open(bundlepath, b'rb') as fh:
734 with open(bundlepath, b'rb') as fh:
723 bundle = exchange.readbundle(ui, fh, bundlepath)
735 bundle = exchange.readbundle(ui, fh, bundlepath)
724 fn(bundle)
736 fn(bundle)
725
737
726 return run
738 return run
727
739
728 def makereadnbytes(size):
740 def makereadnbytes(size):
729 def run():
741 def run():
730 with open(bundlepath, b'rb') as fh:
742 with open(bundlepath, b'rb') as fh:
731 bundle = exchange.readbundle(ui, fh, bundlepath)
743 bundle = exchange.readbundle(ui, fh, bundlepath)
732 while bundle.read(size):
744 while bundle.read(size):
733 pass
745 pass
734
746
735 return run
747 return run
736
748
737 def makestdioread(size):
749 def makestdioread(size):
738 def run():
750 def run():
739 with open(bundlepath, b'rb') as fh:
751 with open(bundlepath, b'rb') as fh:
740 while fh.read(size):
752 while fh.read(size):
741 pass
753 pass
742
754
743 return run
755 return run
744
756
745 # bundle1
757 # bundle1
746
758
747 def deltaiter(bundle):
759 def deltaiter(bundle):
748 for delta in bundle.deltaiter():
760 for delta in bundle.deltaiter():
749 pass
761 pass
750
762
751 def iterchunks(bundle):
763 def iterchunks(bundle):
752 for chunk in bundle.getchunks():
764 for chunk in bundle.getchunks():
753 pass
765 pass
754
766
755 # bundle2
767 # bundle2
756
768
757 def forwardchunks(bundle):
769 def forwardchunks(bundle):
758 for chunk in bundle._forwardchunks():
770 for chunk in bundle._forwardchunks():
759 pass
771 pass
760
772
761 def iterparts(bundle):
773 def iterparts(bundle):
762 for part in bundle.iterparts():
774 for part in bundle.iterparts():
763 pass
775 pass
764
776
765 def iterpartsseekable(bundle):
777 def iterpartsseekable(bundle):
766 for part in bundle.iterparts(seekable=True):
778 for part in bundle.iterparts(seekable=True):
767 pass
779 pass
768
780
769 def seek(bundle):
781 def seek(bundle):
770 for part in bundle.iterparts(seekable=True):
782 for part in bundle.iterparts(seekable=True):
771 part.seek(0, os.SEEK_END)
783 part.seek(0, os.SEEK_END)
772
784
773 def makepartreadnbytes(size):
785 def makepartreadnbytes(size):
774 def run():
786 def run():
775 with open(bundlepath, b'rb') as fh:
787 with open(bundlepath, b'rb') as fh:
776 bundle = exchange.readbundle(ui, fh, bundlepath)
788 bundle = exchange.readbundle(ui, fh, bundlepath)
777 for part in bundle.iterparts():
789 for part in bundle.iterparts():
778 while part.read(size):
790 while part.read(size):
779 pass
791 pass
780
792
781 return run
793 return run
782
794
783 benches = [
795 benches = [
784 (makestdioread(8192), b'read(8k)'),
796 (makestdioread(8192), b'read(8k)'),
785 (makestdioread(16384), b'read(16k)'),
797 (makestdioread(16384), b'read(16k)'),
786 (makestdioread(32768), b'read(32k)'),
798 (makestdioread(32768), b'read(32k)'),
787 (makestdioread(131072), b'read(128k)'),
799 (makestdioread(131072), b'read(128k)'),
788 ]
800 ]
789
801
790 with open(bundlepath, b'rb') as fh:
802 with open(bundlepath, b'rb') as fh:
791 bundle = exchange.readbundle(ui, fh, bundlepath)
803 bundle = exchange.readbundle(ui, fh, bundlepath)
792
804
793 if isinstance(bundle, changegroup.cg1unpacker):
805 if isinstance(bundle, changegroup.cg1unpacker):
794 benches.extend([
806 benches.extend([
795 (makebench(deltaiter), b'cg1 deltaiter()'),
807 (makebench(deltaiter), b'cg1 deltaiter()'),
796 (makebench(iterchunks), b'cg1 getchunks()'),
808 (makebench(iterchunks), b'cg1 getchunks()'),
797 (makereadnbytes(8192), b'cg1 read(8k)'),
809 (makereadnbytes(8192), b'cg1 read(8k)'),
798 (makereadnbytes(16384), b'cg1 read(16k)'),
810 (makereadnbytes(16384), b'cg1 read(16k)'),
799 (makereadnbytes(32768), b'cg1 read(32k)'),
811 (makereadnbytes(32768), b'cg1 read(32k)'),
800 (makereadnbytes(131072), b'cg1 read(128k)'),
812 (makereadnbytes(131072), b'cg1 read(128k)'),
801 ])
813 ])
802 elif isinstance(bundle, bundle2.unbundle20):
814 elif isinstance(bundle, bundle2.unbundle20):
803 benches.extend([
815 benches.extend([
804 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
816 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
805 (makebench(iterparts), b'bundle2 iterparts()'),
817 (makebench(iterparts), b'bundle2 iterparts()'),
806 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
818 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
807 (makebench(seek), b'bundle2 part seek()'),
819 (makebench(seek), b'bundle2 part seek()'),
808 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
820 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
809 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
821 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
810 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
822 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
811 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
823 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
812 ])
824 ])
813 elif isinstance(bundle, streamclone.streamcloneapplier):
825 elif isinstance(bundle, streamclone.streamcloneapplier):
814 raise error.Abort(b'stream clone bundles not supported')
826 raise error.Abort(b'stream clone bundles not supported')
815 else:
827 else:
816 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
828 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
817
829
818 for fn, title in benches:
830 for fn, title in benches:
819 timer, fm = gettimer(ui, opts)
831 timer, fm = gettimer(ui, opts)
820 timer(fn, title=title)
832 timer(fn, title=title)
821 fm.end()
833 fm.end()
822
834
823 @command(b'perfchangegroupchangelog', formatteropts +
835 @command(b'perfchangegroupchangelog', formatteropts +
824 [(b'', b'cgversion', b'02', b'changegroup version'),
836 [(b'', b'cgversion', b'02', b'changegroup version'),
825 (b'r', b'rev', b'', b'revisions to add to changegroup')])
837 (b'r', b'rev', b'', b'revisions to add to changegroup')])
826 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
838 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
827 """Benchmark producing a changelog group for a changegroup.
839 """Benchmark producing a changelog group for a changegroup.
828
840
829 This measures the time spent processing the changelog during a
841 This measures the time spent processing the changelog during a
830 bundle operation. This occurs during `hg bundle` and on a server
842 bundle operation. This occurs during `hg bundle` and on a server
831 processing a `getbundle` wire protocol request (handles clones
843 processing a `getbundle` wire protocol request (handles clones
832 and pull requests).
844 and pull requests).
833
845
834 By default, all revisions are added to the changegroup.
846 By default, all revisions are added to the changegroup.
835 """
847 """
836 opts = _byteskwargs(opts)
848 opts = _byteskwargs(opts)
837 cl = repo.changelog
849 cl = repo.changelog
838 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
850 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
839 bundler = changegroup.getbundler(cgversion, repo)
851 bundler = changegroup.getbundler(cgversion, repo)
840
852
841 def d():
853 def d():
842 state, chunks = bundler._generatechangelog(cl, nodes)
854 state, chunks = bundler._generatechangelog(cl, nodes)
843 for chunk in chunks:
855 for chunk in chunks:
844 pass
856 pass
845
857
846 timer, fm = gettimer(ui, opts)
858 timer, fm = gettimer(ui, opts)
847
859
848 # Terminal printing can interfere with timing. So disable it.
860 # Terminal printing can interfere with timing. So disable it.
849 with ui.configoverride({(b'progress', b'disable'): True}):
861 with ui.configoverride({(b'progress', b'disable'): True}):
850 timer(d)
862 timer(d)
851
863
852 fm.end()
864 fm.end()
853
865
854 @command(b'perfdirs', formatteropts)
866 @command(b'perfdirs', formatteropts)
855 def perfdirs(ui, repo, **opts):
867 def perfdirs(ui, repo, **opts):
856 opts = _byteskwargs(opts)
868 opts = _byteskwargs(opts)
857 timer, fm = gettimer(ui, opts)
869 timer, fm = gettimer(ui, opts)
858 dirstate = repo.dirstate
870 dirstate = repo.dirstate
859 b'a' in dirstate
871 b'a' in dirstate
860 def d():
872 def d():
861 dirstate.hasdir(b'a')
873 dirstate.hasdir(b'a')
862 del dirstate._map._dirs
874 del dirstate._map._dirs
863 timer(d)
875 timer(d)
864 fm.end()
876 fm.end()
865
877
866 @command(b'perfdirstate', formatteropts)
878 @command(b'perfdirstate', formatteropts)
867 def perfdirstate(ui, repo, **opts):
879 def perfdirstate(ui, repo, **opts):
868 opts = _byteskwargs(opts)
880 opts = _byteskwargs(opts)
869 timer, fm = gettimer(ui, opts)
881 timer, fm = gettimer(ui, opts)
870 b"a" in repo.dirstate
882 b"a" in repo.dirstate
871 def d():
883 def d():
872 repo.dirstate.invalidate()
884 repo.dirstate.invalidate()
873 b"a" in repo.dirstate
885 b"a" in repo.dirstate
874 timer(d)
886 timer(d)
875 fm.end()
887 fm.end()
876
888
877 @command(b'perfdirstatedirs', formatteropts)
889 @command(b'perfdirstatedirs', formatteropts)
878 def perfdirstatedirs(ui, repo, **opts):
890 def perfdirstatedirs(ui, repo, **opts):
879 opts = _byteskwargs(opts)
891 opts = _byteskwargs(opts)
880 timer, fm = gettimer(ui, opts)
892 timer, fm = gettimer(ui, opts)
881 b"a" in repo.dirstate
893 b"a" in repo.dirstate
882 def d():
894 def d():
883 repo.dirstate.hasdir(b"a")
895 repo.dirstate.hasdir(b"a")
884 del repo.dirstate._map._dirs
896 del repo.dirstate._map._dirs
885 timer(d)
897 timer(d)
886 fm.end()
898 fm.end()
887
899
888 @command(b'perfdirstatefoldmap', formatteropts)
900 @command(b'perfdirstatefoldmap', formatteropts)
889 def perfdirstatefoldmap(ui, repo, **opts):
901 def perfdirstatefoldmap(ui, repo, **opts):
890 opts = _byteskwargs(opts)
902 opts = _byteskwargs(opts)
891 timer, fm = gettimer(ui, opts)
903 timer, fm = gettimer(ui, opts)
892 dirstate = repo.dirstate
904 dirstate = repo.dirstate
893 b'a' in dirstate
905 b'a' in dirstate
894 def d():
906 def d():
895 dirstate._map.filefoldmap.get(b'a')
907 dirstate._map.filefoldmap.get(b'a')
896 del dirstate._map.filefoldmap
908 del dirstate._map.filefoldmap
897 timer(d)
909 timer(d)
898 fm.end()
910 fm.end()
899
911
900 @command(b'perfdirfoldmap', formatteropts)
912 @command(b'perfdirfoldmap', formatteropts)
901 def perfdirfoldmap(ui, repo, **opts):
913 def perfdirfoldmap(ui, repo, **opts):
902 opts = _byteskwargs(opts)
914 opts = _byteskwargs(opts)
903 timer, fm = gettimer(ui, opts)
915 timer, fm = gettimer(ui, opts)
904 dirstate = repo.dirstate
916 dirstate = repo.dirstate
905 b'a' in dirstate
917 b'a' in dirstate
906 def d():
918 def d():
907 dirstate._map.dirfoldmap.get(b'a')
919 dirstate._map.dirfoldmap.get(b'a')
908 del dirstate._map.dirfoldmap
920 del dirstate._map.dirfoldmap
909 del dirstate._map._dirs
921 del dirstate._map._dirs
910 timer(d)
922 timer(d)
911 fm.end()
923 fm.end()
912
924
913 @command(b'perfdirstatewrite', formatteropts)
925 @command(b'perfdirstatewrite', formatteropts)
914 def perfdirstatewrite(ui, repo, **opts):
926 def perfdirstatewrite(ui, repo, **opts):
915 opts = _byteskwargs(opts)
927 opts = _byteskwargs(opts)
916 timer, fm = gettimer(ui, opts)
928 timer, fm = gettimer(ui, opts)
917 ds = repo.dirstate
929 ds = repo.dirstate
918 b"a" in ds
930 b"a" in ds
919 def d():
931 def d():
920 ds._dirty = True
932 ds._dirty = True
921 ds.write(repo.currenttransaction())
933 ds.write(repo.currenttransaction())
922 timer(d)
934 timer(d)
923 fm.end()
935 fm.end()
924
936
925 @command(b'perfmergecalculate',
937 @command(b'perfmergecalculate',
926 [(b'r', b'rev', b'.', b'rev to merge against')] + formatteropts)
938 [(b'r', b'rev', b'.', b'rev to merge against')] + formatteropts)
927 def perfmergecalculate(ui, repo, rev, **opts):
939 def perfmergecalculate(ui, repo, rev, **opts):
928 opts = _byteskwargs(opts)
940 opts = _byteskwargs(opts)
929 timer, fm = gettimer(ui, opts)
941 timer, fm = gettimer(ui, opts)
930 wctx = repo[None]
942 wctx = repo[None]
931 rctx = scmutil.revsingle(repo, rev, rev)
943 rctx = scmutil.revsingle(repo, rev, rev)
932 ancestor = wctx.ancestor(rctx)
944 ancestor = wctx.ancestor(rctx)
933 # we don't want working dir files to be stat'd in the benchmark, so prime
945 # we don't want working dir files to be stat'd in the benchmark, so prime
934 # that cache
946 # that cache
935 wctx.dirty()
947 wctx.dirty()
936 def d():
948 def d():
937 # acceptremote is True because we don't want prompts in the middle of
949 # acceptremote is True because we don't want prompts in the middle of
938 # our benchmark
950 # our benchmark
939 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
951 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
940 acceptremote=True, followcopies=True)
952 acceptremote=True, followcopies=True)
941 timer(d)
953 timer(d)
942 fm.end()
954 fm.end()
943
955
944 @command(b'perfpathcopies', [], b"REV REV")
956 @command(b'perfpathcopies', [], b"REV REV")
945 def perfpathcopies(ui, repo, rev1, rev2, **opts):
957 def perfpathcopies(ui, repo, rev1, rev2, **opts):
946 """benchmark the copy tracing logic"""
958 """benchmark the copy tracing logic"""
947 opts = _byteskwargs(opts)
959 opts = _byteskwargs(opts)
948 timer, fm = gettimer(ui, opts)
960 timer, fm = gettimer(ui, opts)
949 ctx1 = scmutil.revsingle(repo, rev1, rev1)
961 ctx1 = scmutil.revsingle(repo, rev1, rev1)
950 ctx2 = scmutil.revsingle(repo, rev2, rev2)
962 ctx2 = scmutil.revsingle(repo, rev2, rev2)
951 def d():
963 def d():
952 copies.pathcopies(ctx1, ctx2)
964 copies.pathcopies(ctx1, ctx2)
953 timer(d)
965 timer(d)
954 fm.end()
966 fm.end()
955
967
956 @command(b'perfphases',
968 @command(b'perfphases',
957 [(b'', b'full', False, b'include file reading time too'),
969 [(b'', b'full', False, b'include file reading time too'),
958 ], b"")
970 ], b"")
959 def perfphases(ui, repo, **opts):
971 def perfphases(ui, repo, **opts):
960 """benchmark phasesets computation"""
972 """benchmark phasesets computation"""
961 opts = _byteskwargs(opts)
973 opts = _byteskwargs(opts)
962 timer, fm = gettimer(ui, opts)
974 timer, fm = gettimer(ui, opts)
963 _phases = repo._phasecache
975 _phases = repo._phasecache
964 full = opts.get(b'full')
976 full = opts.get(b'full')
965 def d():
977 def d():
966 phases = _phases
978 phases = _phases
967 if full:
979 if full:
968 clearfilecache(repo, b'_phasecache')
980 clearfilecache(repo, b'_phasecache')
969 phases = repo._phasecache
981 phases = repo._phasecache
970 phases.invalidate()
982 phases.invalidate()
971 phases.loadphaserevs(repo)
983 phases.loadphaserevs(repo)
972 timer(d)
984 timer(d)
973 fm.end()
985 fm.end()
974
986
975 @command(b'perfphasesremote',
987 @command(b'perfphasesremote',
976 [], b"[DEST]")
988 [], b"[DEST]")
977 def perfphasesremote(ui, repo, dest=None, **opts):
989 def perfphasesremote(ui, repo, dest=None, **opts):
978 """benchmark time needed to analyse phases of the remote server"""
990 """benchmark time needed to analyse phases of the remote server"""
979 from mercurial.node import (
991 from mercurial.node import (
980 bin,
992 bin,
981 )
993 )
982 from mercurial import (
994 from mercurial import (
983 exchange,
995 exchange,
984 hg,
996 hg,
985 phases,
997 phases,
986 )
998 )
987 opts = _byteskwargs(opts)
999 opts = _byteskwargs(opts)
988 timer, fm = gettimer(ui, opts)
1000 timer, fm = gettimer(ui, opts)
989
1001
990 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
1002 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
991 if not path:
1003 if not path:
992 raise error.Abort((b'default repository not configured!'),
1004 raise error.Abort((b'default repository not configured!'),
993 hint=(b"see 'hg help config.paths'"))
1005 hint=(b"see 'hg help config.paths'"))
994 dest = path.pushloc or path.loc
1006 dest = path.pushloc or path.loc
995 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
1007 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
996 other = hg.peer(repo, opts, dest)
1008 other = hg.peer(repo, opts, dest)
997
1009
998 # easier to perform discovery through the operation
1010 # easier to perform discovery through the operation
999 op = exchange.pushoperation(repo, other)
1011 op = exchange.pushoperation(repo, other)
1000 exchange._pushdiscoverychangeset(op)
1012 exchange._pushdiscoverychangeset(op)
1001
1013
1002 remotesubset = op.fallbackheads
1014 remotesubset = op.fallbackheads
1003
1015
1004 with other.commandexecutor() as e:
1016 with other.commandexecutor() as e:
1005 remotephases = e.callcommand(b'listkeys',
1017 remotephases = e.callcommand(b'listkeys',
1006 {b'namespace': b'phases'}).result()
1018 {b'namespace': b'phases'}).result()
1007 del other
1019 del other
1008 publishing = remotephases.get(b'publishing', False)
1020 publishing = remotephases.get(b'publishing', False)
1009 if publishing:
1021 if publishing:
1010 ui.status((b'publishing: yes\n'))
1022 ui.status((b'publishing: yes\n'))
1011 else:
1023 else:
1012 ui.status((b'publishing: no\n'))
1024 ui.status((b'publishing: no\n'))
1013
1025
1014 nodemap = repo.changelog.nodemap
1026 nodemap = repo.changelog.nodemap
1015 nonpublishroots = 0
1027 nonpublishroots = 0
1016 for nhex, phase in remotephases.iteritems():
1028 for nhex, phase in remotephases.iteritems():
1017 if nhex == b'publishing': # ignore data related to publish option
1029 if nhex == b'publishing': # ignore data related to publish option
1018 continue
1030 continue
1019 node = bin(nhex)
1031 node = bin(nhex)
1020 if node in nodemap and int(phase):
1032 if node in nodemap and int(phase):
1021 nonpublishroots += 1
1033 nonpublishroots += 1
1022 ui.status((b'number of roots: %d\n') % len(remotephases))
1034 ui.status((b'number of roots: %d\n') % len(remotephases))
1023 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
1035 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
1024 def d():
1036 def d():
1025 phases.remotephasessummary(repo,
1037 phases.remotephasessummary(repo,
1026 remotesubset,
1038 remotesubset,
1027 remotephases)
1039 remotephases)
1028 timer(d)
1040 timer(d)
1029 fm.end()
1041 fm.end()
1030
1042
1031 @command(b'perfmanifest',[
1043 @command(b'perfmanifest',[
1032 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1044 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1033 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1045 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1034 ] + formatteropts, b'REV|NODE')
1046 ] + formatteropts, b'REV|NODE')
1035 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1047 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1036 """benchmark the time to read a manifest from disk and return a usable
1048 """benchmark the time to read a manifest from disk and return a usable
1037 dict-like object
1049 dict-like object
1038
1050
1039 Manifest caches are cleared before retrieval."""
1051 Manifest caches are cleared before retrieval."""
1040 opts = _byteskwargs(opts)
1052 opts = _byteskwargs(opts)
1041 timer, fm = gettimer(ui, opts)
1053 timer, fm = gettimer(ui, opts)
1042 if not manifest_rev:
1054 if not manifest_rev:
1043 ctx = scmutil.revsingle(repo, rev, rev)
1055 ctx = scmutil.revsingle(repo, rev, rev)
1044 t = ctx.manifestnode()
1056 t = ctx.manifestnode()
1045 else:
1057 else:
1046 from mercurial.node import bin
1058 from mercurial.node import bin
1047
1059
1048 if len(rev) == 40:
1060 if len(rev) == 40:
1049 t = bin(rev)
1061 t = bin(rev)
1050 else:
1062 else:
1051 try:
1063 try:
1052 rev = int(rev)
1064 rev = int(rev)
1053
1065
1054 if util.safehasattr(repo.manifestlog, b'getstorage'):
1066 if util.safehasattr(repo.manifestlog, b'getstorage'):
1055 t = repo.manifestlog.getstorage(b'').node(rev)
1067 t = repo.manifestlog.getstorage(b'').node(rev)
1056 else:
1068 else:
1057 t = repo.manifestlog._revlog.lookup(rev)
1069 t = repo.manifestlog._revlog.lookup(rev)
1058 except ValueError:
1070 except ValueError:
1059 raise error.Abort(b'manifest revision must be integer or full '
1071 raise error.Abort(b'manifest revision must be integer or full '
1060 b'node')
1072 b'node')
1061 def d():
1073 def d():
1062 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1074 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1063 repo.manifestlog[t].read()
1075 repo.manifestlog[t].read()
1064 timer(d)
1076 timer(d)
1065 fm.end()
1077 fm.end()
1066
1078
1067 @command(b'perfchangeset', formatteropts)
1079 @command(b'perfchangeset', formatteropts)
1068 def perfchangeset(ui, repo, rev, **opts):
1080 def perfchangeset(ui, repo, rev, **opts):
1069 opts = _byteskwargs(opts)
1081 opts = _byteskwargs(opts)
1070 timer, fm = gettimer(ui, opts)
1082 timer, fm = gettimer(ui, opts)
1071 n = scmutil.revsingle(repo, rev).node()
1083 n = scmutil.revsingle(repo, rev).node()
1072 def d():
1084 def d():
1073 repo.changelog.read(n)
1085 repo.changelog.read(n)
1074 #repo.changelog._cache = None
1086 #repo.changelog._cache = None
1075 timer(d)
1087 timer(d)
1076 fm.end()
1088 fm.end()
1077
1089
1078 @command(b'perfignore', formatteropts)
1090 @command(b'perfignore', formatteropts)
1079 def perfignore(ui, repo, **opts):
1091 def perfignore(ui, repo, **opts):
1080 """benchmark operation related to computing ignore"""
1092 """benchmark operation related to computing ignore"""
1081 opts = _byteskwargs(opts)
1093 opts = _byteskwargs(opts)
1082 timer, fm = gettimer(ui, opts)
1094 timer, fm = gettimer(ui, opts)
1083 dirstate = repo.dirstate
1095 dirstate = repo.dirstate
1084
1096
1085 def setupone():
1097 def setupone():
1086 dirstate.invalidate()
1098 dirstate.invalidate()
1087 clearfilecache(dirstate, b'_ignore')
1099 clearfilecache(dirstate, b'_ignore')
1088
1100
1089 def runone():
1101 def runone():
1090 dirstate._ignore
1102 dirstate._ignore
1091
1103
1092 timer(runone, setup=setupone, title=b"load")
1104 timer(runone, setup=setupone, title=b"load")
1093 fm.end()
1105 fm.end()
1094
1106
1095 @command(b'perfindex', [
1107 @command(b'perfindex', [
1096 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1108 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1097 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1109 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1098 ] + formatteropts)
1110 ] + formatteropts)
1099 def perfindex(ui, repo, **opts):
1111 def perfindex(ui, repo, **opts):
1100 """benchmark index creation time followed by a lookup
1112 """benchmark index creation time followed by a lookup
1101
1113
1102 The default is to look `tip` up. Depending on the index implementation,
1114 The default is to look `tip` up. Depending on the index implementation,
1103 the revision looked up can matters. For example, an implementation
1115 the revision looked up can matters. For example, an implementation
1104 scanning the index will have a faster lookup time for `--rev tip` than for
1116 scanning the index will have a faster lookup time for `--rev tip` than for
1105 `--rev 0`. The number of looked up revisions and their order can also
1117 `--rev 0`. The number of looked up revisions and their order can also
1106 matters.
1118 matters.
1107
1119
1108 Example of useful set to test:
1120 Example of useful set to test:
1109 * tip
1121 * tip
1110 * 0
1122 * 0
1111 * -10:
1123 * -10:
1112 * :10
1124 * :10
1113 * -10: + :10
1125 * -10: + :10
1114 * :10: + -10:
1126 * :10: + -10:
1115 * -10000:
1127 * -10000:
1116 * -10000: + 0
1128 * -10000: + 0
1117
1129
1118 It is not currently possible to check for lookup of a missing node. For
1130 It is not currently possible to check for lookup of a missing node. For
1119 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1131 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1120 import mercurial.revlog
1132 import mercurial.revlog
1121 opts = _byteskwargs(opts)
1133 opts = _byteskwargs(opts)
1122 timer, fm = gettimer(ui, opts)
1134 timer, fm = gettimer(ui, opts)
1123 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1135 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1124 if opts[b'no_lookup']:
1136 if opts[b'no_lookup']:
1125 if opts['rev']:
1137 if opts['rev']:
1126 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1138 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1127 nodes = []
1139 nodes = []
1128 elif not opts[b'rev']:
1140 elif not opts[b'rev']:
1129 nodes = [repo[b"tip"].node()]
1141 nodes = [repo[b"tip"].node()]
1130 else:
1142 else:
1131 revs = scmutil.revrange(repo, opts[b'rev'])
1143 revs = scmutil.revrange(repo, opts[b'rev'])
1132 cl = repo.changelog
1144 cl = repo.changelog
1133 nodes = [cl.node(r) for r in revs]
1145 nodes = [cl.node(r) for r in revs]
1134
1146
1135 unfi = repo.unfiltered()
1147 unfi = repo.unfiltered()
1136 # find the filecache func directly
1148 # find the filecache func directly
1137 # This avoid polluting the benchmark with the filecache logic
1149 # This avoid polluting the benchmark with the filecache logic
1138 makecl = unfi.__class__.changelog.func
1150 makecl = unfi.__class__.changelog.func
1139 def setup():
1151 def setup():
1140 # probably not necessary, but for good measure
1152 # probably not necessary, but for good measure
1141 clearchangelog(unfi)
1153 clearchangelog(unfi)
1142 def d():
1154 def d():
1143 cl = makecl(unfi)
1155 cl = makecl(unfi)
1144 for n in nodes:
1156 for n in nodes:
1145 cl.rev(n)
1157 cl.rev(n)
1146 timer(d, setup=setup)
1158 timer(d, setup=setup)
1147 fm.end()
1159 fm.end()
1148
1160
1149 @command(b'perfnodemap', [
1161 @command(b'perfnodemap', [
1150 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1162 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1151 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1163 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1152 ] + formatteropts)
1164 ] + formatteropts)
1153 def perfnodemap(ui, repo, **opts):
1165 def perfnodemap(ui, repo, **opts):
1154 """benchmark the time necessary to look up revision from a cold nodemap
1166 """benchmark the time necessary to look up revision from a cold nodemap
1155
1167
1156 Depending on the implementation, the amount and order of revision we look
1168 Depending on the implementation, the amount and order of revision we look
1157 up can varies. Example of useful set to test:
1169 up can varies. Example of useful set to test:
1158 * tip
1170 * tip
1159 * 0
1171 * 0
1160 * -10:
1172 * -10:
1161 * :10
1173 * :10
1162 * -10: + :10
1174 * -10: + :10
1163 * :10: + -10:
1175 * :10: + -10:
1164 * -10000:
1176 * -10000:
1165 * -10000: + 0
1177 * -10000: + 0
1166
1178
1167 The command currently focus on valid binary lookup. Benchmarking for
1179 The command currently focus on valid binary lookup. Benchmarking for
1168 hexlookup, prefix lookup and missing lookup would also be valuable.
1180 hexlookup, prefix lookup and missing lookup would also be valuable.
1169 """
1181 """
1170 import mercurial.revlog
1182 import mercurial.revlog
1171 opts = _byteskwargs(opts)
1183 opts = _byteskwargs(opts)
1172 timer, fm = gettimer(ui, opts)
1184 timer, fm = gettimer(ui, opts)
1173 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1185 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1174
1186
1175 unfi = repo.unfiltered()
1187 unfi = repo.unfiltered()
1176 clearcaches = opts['clear_caches']
1188 clearcaches = opts['clear_caches']
1177 # find the filecache func directly
1189 # find the filecache func directly
1178 # This avoid polluting the benchmark with the filecache logic
1190 # This avoid polluting the benchmark with the filecache logic
1179 makecl = unfi.__class__.changelog.func
1191 makecl = unfi.__class__.changelog.func
1180 if not opts[b'rev']:
1192 if not opts[b'rev']:
1181 raise error.Abort('use --rev to specify revisions to look up')
1193 raise error.Abort('use --rev to specify revisions to look up')
1182 revs = scmutil.revrange(repo, opts[b'rev'])
1194 revs = scmutil.revrange(repo, opts[b'rev'])
1183 cl = repo.changelog
1195 cl = repo.changelog
1184 nodes = [cl.node(r) for r in revs]
1196 nodes = [cl.node(r) for r in revs]
1185
1197
1186 # use a list to pass reference to a nodemap from one closure to the next
1198 # use a list to pass reference to a nodemap from one closure to the next
1187 nodeget = [None]
1199 nodeget = [None]
1188 def setnodeget():
1200 def setnodeget():
1189 # probably not necessary, but for good measure
1201 # probably not necessary, but for good measure
1190 clearchangelog(unfi)
1202 clearchangelog(unfi)
1191 nodeget[0] = makecl(unfi).nodemap.get
1203 nodeget[0] = makecl(unfi).nodemap.get
1192
1204
1193 def d():
1205 def d():
1194 get = nodeget[0]
1206 get = nodeget[0]
1195 for n in nodes:
1207 for n in nodes:
1196 get(n)
1208 get(n)
1197
1209
1198 setup = None
1210 setup = None
1199 if clearcaches:
1211 if clearcaches:
1200 def setup():
1212 def setup():
1201 setnodeget()
1213 setnodeget()
1202 else:
1214 else:
1203 setnodeget()
1215 setnodeget()
1204 d() # prewarm the data structure
1216 d() # prewarm the data structure
1205 timer(d, setup=setup)
1217 timer(d, setup=setup)
1206 fm.end()
1218 fm.end()
1207
1219
1208 @command(b'perfstartup', formatteropts)
1220 @command(b'perfstartup', formatteropts)
1209 def perfstartup(ui, repo, **opts):
1221 def perfstartup(ui, repo, **opts):
1210 opts = _byteskwargs(opts)
1222 opts = _byteskwargs(opts)
1211 timer, fm = gettimer(ui, opts)
1223 timer, fm = gettimer(ui, opts)
1212 def d():
1224 def d():
1213 if os.name != r'nt':
1225 if os.name != r'nt':
1214 os.system(b"HGRCPATH= %s version -q > /dev/null" %
1226 os.system(b"HGRCPATH= %s version -q > /dev/null" %
1215 fsencode(sys.argv[0]))
1227 fsencode(sys.argv[0]))
1216 else:
1228 else:
1217 os.environ[r'HGRCPATH'] = r' '
1229 os.environ[r'HGRCPATH'] = r' '
1218 os.system(r"%s version -q > NUL" % sys.argv[0])
1230 os.system(r"%s version -q > NUL" % sys.argv[0])
1219 timer(d)
1231 timer(d)
1220 fm.end()
1232 fm.end()
1221
1233
1222 @command(b'perfparents', formatteropts)
1234 @command(b'perfparents', formatteropts)
1223 def perfparents(ui, repo, **opts):
1235 def perfparents(ui, repo, **opts):
1224 """benchmark the time necessary to fetch one changeset's parents.
1236 """benchmark the time necessary to fetch one changeset's parents.
1225
1237
1226 The fetch is done using the `node identifier`, traversing all object layers
1238 The fetch is done using the `node identifier`, traversing all object layers
1227 from the repository object. The first N revisions will be used for this
1239 from the repository object. The first N revisions will be used for this
1228 benchmark. N is controlled by the ``perf.parentscount`` config option
1240 benchmark. N is controlled by the ``perf.parentscount`` config option
1229 (default: 1000).
1241 (default: 1000).
1230 """
1242 """
1231 opts = _byteskwargs(opts)
1243 opts = _byteskwargs(opts)
1232 timer, fm = gettimer(ui, opts)
1244 timer, fm = gettimer(ui, opts)
1233 # control the number of commits perfparents iterates over
1245 # control the number of commits perfparents iterates over
1234 # experimental config: perf.parentscount
1246 # experimental config: perf.parentscount
1235 count = getint(ui, b"perf", b"parentscount", 1000)
1247 count = getint(ui, b"perf", b"parentscount", 1000)
1236 if len(repo.changelog) < count:
1248 if len(repo.changelog) < count:
1237 raise error.Abort(b"repo needs %d commits for this test" % count)
1249 raise error.Abort(b"repo needs %d commits for this test" % count)
1238 repo = repo.unfiltered()
1250 repo = repo.unfiltered()
1239 nl = [repo.changelog.node(i) for i in _xrange(count)]
1251 nl = [repo.changelog.node(i) for i in _xrange(count)]
1240 def d():
1252 def d():
1241 for n in nl:
1253 for n in nl:
1242 repo.changelog.parents(n)
1254 repo.changelog.parents(n)
1243 timer(d)
1255 timer(d)
1244 fm.end()
1256 fm.end()
1245
1257
1246 @command(b'perfctxfiles', formatteropts)
1258 @command(b'perfctxfiles', formatteropts)
1247 def perfctxfiles(ui, repo, x, **opts):
1259 def perfctxfiles(ui, repo, x, **opts):
1248 opts = _byteskwargs(opts)
1260 opts = _byteskwargs(opts)
1249 x = int(x)
1261 x = int(x)
1250 timer, fm = gettimer(ui, opts)
1262 timer, fm = gettimer(ui, opts)
1251 def d():
1263 def d():
1252 len(repo[x].files())
1264 len(repo[x].files())
1253 timer(d)
1265 timer(d)
1254 fm.end()
1266 fm.end()
1255
1267
1256 @command(b'perfrawfiles', formatteropts)
1268 @command(b'perfrawfiles', formatteropts)
1257 def perfrawfiles(ui, repo, x, **opts):
1269 def perfrawfiles(ui, repo, x, **opts):
1258 opts = _byteskwargs(opts)
1270 opts = _byteskwargs(opts)
1259 x = int(x)
1271 x = int(x)
1260 timer, fm = gettimer(ui, opts)
1272 timer, fm = gettimer(ui, opts)
1261 cl = repo.changelog
1273 cl = repo.changelog
1262 def d():
1274 def d():
1263 len(cl.read(x)[3])
1275 len(cl.read(x)[3])
1264 timer(d)
1276 timer(d)
1265 fm.end()
1277 fm.end()
1266
1278
1267 @command(b'perflookup', formatteropts)
1279 @command(b'perflookup', formatteropts)
1268 def perflookup(ui, repo, rev, **opts):
1280 def perflookup(ui, repo, rev, **opts):
1269 opts = _byteskwargs(opts)
1281 opts = _byteskwargs(opts)
1270 timer, fm = gettimer(ui, opts)
1282 timer, fm = gettimer(ui, opts)
1271 timer(lambda: len(repo.lookup(rev)))
1283 timer(lambda: len(repo.lookup(rev)))
1272 fm.end()
1284 fm.end()
1273
1285
1274 @command(b'perflinelogedits',
1286 @command(b'perflinelogedits',
1275 [(b'n', b'edits', 10000, b'number of edits'),
1287 [(b'n', b'edits', 10000, b'number of edits'),
1276 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1288 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1277 ], norepo=True)
1289 ], norepo=True)
1278 def perflinelogedits(ui, **opts):
1290 def perflinelogedits(ui, **opts):
1279 from mercurial import linelog
1291 from mercurial import linelog
1280
1292
1281 opts = _byteskwargs(opts)
1293 opts = _byteskwargs(opts)
1282
1294
1283 edits = opts[b'edits']
1295 edits = opts[b'edits']
1284 maxhunklines = opts[b'max_hunk_lines']
1296 maxhunklines = opts[b'max_hunk_lines']
1285
1297
1286 maxb1 = 100000
1298 maxb1 = 100000
1287 random.seed(0)
1299 random.seed(0)
1288 randint = random.randint
1300 randint = random.randint
1289 currentlines = 0
1301 currentlines = 0
1290 arglist = []
1302 arglist = []
1291 for rev in _xrange(edits):
1303 for rev in _xrange(edits):
1292 a1 = randint(0, currentlines)
1304 a1 = randint(0, currentlines)
1293 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1305 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1294 b1 = randint(0, maxb1)
1306 b1 = randint(0, maxb1)
1295 b2 = randint(b1, b1 + maxhunklines)
1307 b2 = randint(b1, b1 + maxhunklines)
1296 currentlines += (b2 - b1) - (a2 - a1)
1308 currentlines += (b2 - b1) - (a2 - a1)
1297 arglist.append((rev, a1, a2, b1, b2))
1309 arglist.append((rev, a1, a2, b1, b2))
1298
1310
1299 def d():
1311 def d():
1300 ll = linelog.linelog()
1312 ll = linelog.linelog()
1301 for args in arglist:
1313 for args in arglist:
1302 ll.replacelines(*args)
1314 ll.replacelines(*args)
1303
1315
1304 timer, fm = gettimer(ui, opts)
1316 timer, fm = gettimer(ui, opts)
1305 timer(d)
1317 timer(d)
1306 fm.end()
1318 fm.end()
1307
1319
1308 @command(b'perfrevrange', formatteropts)
1320 @command(b'perfrevrange', formatteropts)
1309 def perfrevrange(ui, repo, *specs, **opts):
1321 def perfrevrange(ui, repo, *specs, **opts):
1310 opts = _byteskwargs(opts)
1322 opts = _byteskwargs(opts)
1311 timer, fm = gettimer(ui, opts)
1323 timer, fm = gettimer(ui, opts)
1312 revrange = scmutil.revrange
1324 revrange = scmutil.revrange
1313 timer(lambda: len(revrange(repo, specs)))
1325 timer(lambda: len(revrange(repo, specs)))
1314 fm.end()
1326 fm.end()
1315
1327
1316 @command(b'perfnodelookup', formatteropts)
1328 @command(b'perfnodelookup', formatteropts)
1317 def perfnodelookup(ui, repo, rev, **opts):
1329 def perfnodelookup(ui, repo, rev, **opts):
1318 opts = _byteskwargs(opts)
1330 opts = _byteskwargs(opts)
1319 timer, fm = gettimer(ui, opts)
1331 timer, fm = gettimer(ui, opts)
1320 import mercurial.revlog
1332 import mercurial.revlog
1321 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1333 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1322 n = scmutil.revsingle(repo, rev).node()
1334 n = scmutil.revsingle(repo, rev).node()
1323 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1335 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1324 def d():
1336 def d():
1325 cl.rev(n)
1337 cl.rev(n)
1326 clearcaches(cl)
1338 clearcaches(cl)
1327 timer(d)
1339 timer(d)
1328 fm.end()
1340 fm.end()
1329
1341
1330 @command(b'perflog',
1342 @command(b'perflog',
1331 [(b'', b'rename', False, b'ask log to follow renames')
1343 [(b'', b'rename', False, b'ask log to follow renames')
1332 ] + formatteropts)
1344 ] + formatteropts)
1333 def perflog(ui, repo, rev=None, **opts):
1345 def perflog(ui, repo, rev=None, **opts):
1334 opts = _byteskwargs(opts)
1346 opts = _byteskwargs(opts)
1335 if rev is None:
1347 if rev is None:
1336 rev=[]
1348 rev=[]
1337 timer, fm = gettimer(ui, opts)
1349 timer, fm = gettimer(ui, opts)
1338 ui.pushbuffer()
1350 ui.pushbuffer()
1339 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1351 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1340 copies=opts.get(b'rename')))
1352 copies=opts.get(b'rename')))
1341 ui.popbuffer()
1353 ui.popbuffer()
1342 fm.end()
1354 fm.end()
1343
1355
1344 @command(b'perfmoonwalk', formatteropts)
1356 @command(b'perfmoonwalk', formatteropts)
1345 def perfmoonwalk(ui, repo, **opts):
1357 def perfmoonwalk(ui, repo, **opts):
1346 """benchmark walking the changelog backwards
1358 """benchmark walking the changelog backwards
1347
1359
1348 This also loads the changelog data for each revision in the changelog.
1360 This also loads the changelog data for each revision in the changelog.
1349 """
1361 """
1350 opts = _byteskwargs(opts)
1362 opts = _byteskwargs(opts)
1351 timer, fm = gettimer(ui, opts)
1363 timer, fm = gettimer(ui, opts)
1352 def moonwalk():
1364 def moonwalk():
1353 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1365 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1354 ctx = repo[i]
1366 ctx = repo[i]
1355 ctx.branch() # read changelog data (in addition to the index)
1367 ctx.branch() # read changelog data (in addition to the index)
1356 timer(moonwalk)
1368 timer(moonwalk)
1357 fm.end()
1369 fm.end()
1358
1370
1359 @command(b'perftemplating',
1371 @command(b'perftemplating',
1360 [(b'r', b'rev', [], b'revisions to run the template on'),
1372 [(b'r', b'rev', [], b'revisions to run the template on'),
1361 ] + formatteropts)
1373 ] + formatteropts)
1362 def perftemplating(ui, repo, testedtemplate=None, **opts):
1374 def perftemplating(ui, repo, testedtemplate=None, **opts):
1363 """test the rendering time of a given template"""
1375 """test the rendering time of a given template"""
1364 if makelogtemplater is None:
1376 if makelogtemplater is None:
1365 raise error.Abort((b"perftemplating not available with this Mercurial"),
1377 raise error.Abort((b"perftemplating not available with this Mercurial"),
1366 hint=b"use 4.3 or later")
1378 hint=b"use 4.3 or later")
1367
1379
1368 opts = _byteskwargs(opts)
1380 opts = _byteskwargs(opts)
1369
1381
1370 nullui = ui.copy()
1382 nullui = ui.copy()
1371 nullui.fout = open(os.devnull, r'wb')
1383 nullui.fout = open(os.devnull, r'wb')
1372 nullui.disablepager()
1384 nullui.disablepager()
1373 revs = opts.get(b'rev')
1385 revs = opts.get(b'rev')
1374 if not revs:
1386 if not revs:
1375 revs = [b'all()']
1387 revs = [b'all()']
1376 revs = list(scmutil.revrange(repo, revs))
1388 revs = list(scmutil.revrange(repo, revs))
1377
1389
1378 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1390 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1379 b' {author|person}: {desc|firstline}\n')
1391 b' {author|person}: {desc|firstline}\n')
1380 if testedtemplate is None:
1392 if testedtemplate is None:
1381 testedtemplate = defaulttemplate
1393 testedtemplate = defaulttemplate
1382 displayer = makelogtemplater(nullui, repo, testedtemplate)
1394 displayer = makelogtemplater(nullui, repo, testedtemplate)
1383 def format():
1395 def format():
1384 for r in revs:
1396 for r in revs:
1385 ctx = repo[r]
1397 ctx = repo[r]
1386 displayer.show(ctx)
1398 displayer.show(ctx)
1387 displayer.flush(ctx)
1399 displayer.flush(ctx)
1388
1400
1389 timer, fm = gettimer(ui, opts)
1401 timer, fm = gettimer(ui, opts)
1390 timer(format)
1402 timer(format)
1391 fm.end()
1403 fm.end()
1392
1404
1393 @command(b'perfhelper-pathcopies', formatteropts +
1405 @command(b'perfhelper-pathcopies', formatteropts +
1394 [
1406 [
1395 (b'r', b'revs', [], b'restrict search to these revisions'),
1407 (b'r', b'revs', [], b'restrict search to these revisions'),
1396 (b'', b'timing', False, b'provides extra data (costly)'),
1408 (b'', b'timing', False, b'provides extra data (costly)'),
1397 ])
1409 ])
1398 def perfhelperpathcopies(ui, repo, revs=[], **opts):
1410 def perfhelperpathcopies(ui, repo, revs=[], **opts):
1399 """find statistic about potential parameters for the `perftracecopies`
1411 """find statistic about potential parameters for the `perftracecopies`
1400
1412
1401 This command find source-destination pair relevant for copytracing testing.
1413 This command find source-destination pair relevant for copytracing testing.
1402 It report value for some of the parameters that impact copy tracing time.
1414 It report value for some of the parameters that impact copy tracing time.
1403
1415
1404 If `--timing` is set, rename detection is run and the associated timing
1416 If `--timing` is set, rename detection is run and the associated timing
1405 will be reported. The extra details comes at the cost of a slower command
1417 will be reported. The extra details comes at the cost of a slower command
1406 execution.
1418 execution.
1407
1419
1408 Since the rename detection is only run once, other factors might easily
1420 Since the rename detection is only run once, other factors might easily
1409 affect the precision of the timing. However it should give a good
1421 affect the precision of the timing. However it should give a good
1410 approximation of which revision pairs are very costly.
1422 approximation of which revision pairs are very costly.
1411 """
1423 """
1412 opts = _byteskwargs(opts)
1424 opts = _byteskwargs(opts)
1413 fm = ui.formatter(b'perf', opts)
1425 fm = ui.formatter(b'perf', opts)
1414 dotiming = opts[b'timing']
1426 dotiming = opts[b'timing']
1415
1427
1416 if dotiming:
1428 if dotiming:
1417 header = '%12s %12s %12s %12s %12s %12s\n'
1429 header = '%12s %12s %12s %12s %12s %12s\n'
1418 output = ("%(source)12s %(destination)12s "
1430 output = ("%(source)12s %(destination)12s "
1419 "%(nbrevs)12d %(nbmissingfiles)12d "
1431 "%(nbrevs)12d %(nbmissingfiles)12d "
1420 "%(nbrenamedfiles)12d %(time)18.5f\n")
1432 "%(nbrenamedfiles)12d %(time)18.5f\n")
1421 header_names = ("source", "destination", "nb-revs", "nb-files",
1433 header_names = ("source", "destination", "nb-revs", "nb-files",
1422 "nb-renames", "time")
1434 "nb-renames", "time")
1423 fm.plain(header % header_names)
1435 fm.plain(header % header_names)
1424 else:
1436 else:
1425 header = '%12s %12s %12s %12s\n'
1437 header = '%12s %12s %12s %12s\n'
1426 output = ("%(source)12s %(destination)12s "
1438 output = ("%(source)12s %(destination)12s "
1427 "%(nbrevs)12d %(nbmissingfiles)12d\n")
1439 "%(nbrevs)12d %(nbmissingfiles)12d\n")
1428 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
1440 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
1429
1441
1430 if not revs:
1442 if not revs:
1431 revs = ['all()']
1443 revs = ['all()']
1432 revs = scmutil.revrange(repo, revs)
1444 revs = scmutil.revrange(repo, revs)
1433
1445
1434 roi = repo.revs('merge() and %ld', revs)
1446 roi = repo.revs('merge() and %ld', revs)
1435 for r in roi:
1447 for r in roi:
1436 ctx = repo[r]
1448 ctx = repo[r]
1437 p1 = ctx.p1().rev()
1449 p1 = ctx.p1().rev()
1438 p2 = ctx.p2().rev()
1450 p2 = ctx.p2().rev()
1439 bases = repo.changelog._commonancestorsheads(p1, p2)
1451 bases = repo.changelog._commonancestorsheads(p1, p2)
1440 for p in (p1, p2):
1452 for p in (p1, p2):
1441 for b in bases:
1453 for b in bases:
1442 base = repo[b]
1454 base = repo[b]
1443 parent = repo[p]
1455 parent = repo[p]
1444 missing = copies._computeforwardmissing(base, parent)
1456 missing = copies._computeforwardmissing(base, parent)
1445 if not missing:
1457 if not missing:
1446 continue
1458 continue
1447 data = {
1459 data = {
1448 b'source': base.hex(),
1460 b'source': base.hex(),
1449 b'destination': parent.hex(),
1461 b'destination': parent.hex(),
1450 b'nbrevs': len(repo.revs('%d::%d', b, p)),
1462 b'nbrevs': len(repo.revs('%d::%d', b, p)),
1451 b'nbmissingfiles': len(missing),
1463 b'nbmissingfiles': len(missing),
1452 }
1464 }
1453 if dotiming:
1465 if dotiming:
1454 begin = util.timer()
1466 begin = util.timer()
1455 renames = copies.pathcopies(base, parent)
1467 renames = copies.pathcopies(base, parent)
1456 end = util.timer()
1468 end = util.timer()
1457 # not very stable timing since we did only one run
1469 # not very stable timing since we did only one run
1458 data['time'] = end - begin
1470 data['time'] = end - begin
1459 data['nbrenamedfiles'] = len(renames)
1471 data['nbrenamedfiles'] = len(renames)
1460 fm.startitem()
1472 fm.startitem()
1461 fm.data(**data)
1473 fm.data(**data)
1462 out = data.copy()
1474 out = data.copy()
1463 out['source'] = fm.hexfunc(base.node())
1475 out['source'] = fm.hexfunc(base.node())
1464 out['destination'] = fm.hexfunc(parent.node())
1476 out['destination'] = fm.hexfunc(parent.node())
1465 fm.plain(output % out)
1477 fm.plain(output % out)
1466
1478
1467 fm.end()
1479 fm.end()
1468
1480
1469 @command(b'perfcca', formatteropts)
1481 @command(b'perfcca', formatteropts)
1470 def perfcca(ui, repo, **opts):
1482 def perfcca(ui, repo, **opts):
1471 opts = _byteskwargs(opts)
1483 opts = _byteskwargs(opts)
1472 timer, fm = gettimer(ui, opts)
1484 timer, fm = gettimer(ui, opts)
1473 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1485 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1474 fm.end()
1486 fm.end()
1475
1487
1476 @command(b'perffncacheload', formatteropts)
1488 @command(b'perffncacheload', formatteropts)
1477 def perffncacheload(ui, repo, **opts):
1489 def perffncacheload(ui, repo, **opts):
1478 opts = _byteskwargs(opts)
1490 opts = _byteskwargs(opts)
1479 timer, fm = gettimer(ui, opts)
1491 timer, fm = gettimer(ui, opts)
1480 s = repo.store
1492 s = repo.store
1481 def d():
1493 def d():
1482 s.fncache._load()
1494 s.fncache._load()
1483 timer(d)
1495 timer(d)
1484 fm.end()
1496 fm.end()
1485
1497
1486 @command(b'perffncachewrite', formatteropts)
1498 @command(b'perffncachewrite', formatteropts)
1487 def perffncachewrite(ui, repo, **opts):
1499 def perffncachewrite(ui, repo, **opts):
1488 opts = _byteskwargs(opts)
1500 opts = _byteskwargs(opts)
1489 timer, fm = gettimer(ui, opts)
1501 timer, fm = gettimer(ui, opts)
1490 s = repo.store
1502 s = repo.store
1491 lock = repo.lock()
1503 lock = repo.lock()
1492 s.fncache._load()
1504 s.fncache._load()
1493 tr = repo.transaction(b'perffncachewrite')
1505 tr = repo.transaction(b'perffncachewrite')
1494 tr.addbackup(b'fncache')
1506 tr.addbackup(b'fncache')
1495 def d():
1507 def d():
1496 s.fncache._dirty = True
1508 s.fncache._dirty = True
1497 s.fncache.write(tr)
1509 s.fncache.write(tr)
1498 timer(d)
1510 timer(d)
1499 tr.close()
1511 tr.close()
1500 lock.release()
1512 lock.release()
1501 fm.end()
1513 fm.end()
1502
1514
1503 @command(b'perffncacheencode', formatteropts)
1515 @command(b'perffncacheencode', formatteropts)
1504 def perffncacheencode(ui, repo, **opts):
1516 def perffncacheencode(ui, repo, **opts):
1505 opts = _byteskwargs(opts)
1517 opts = _byteskwargs(opts)
1506 timer, fm = gettimer(ui, opts)
1518 timer, fm = gettimer(ui, opts)
1507 s = repo.store
1519 s = repo.store
1508 s.fncache._load()
1520 s.fncache._load()
1509 def d():
1521 def d():
1510 for p in s.fncache.entries:
1522 for p in s.fncache.entries:
1511 s.encode(p)
1523 s.encode(p)
1512 timer(d)
1524 timer(d)
1513 fm.end()
1525 fm.end()
1514
1526
1515 def _bdiffworker(q, blocks, xdiff, ready, done):
1527 def _bdiffworker(q, blocks, xdiff, ready, done):
1516 while not done.is_set():
1528 while not done.is_set():
1517 pair = q.get()
1529 pair = q.get()
1518 while pair is not None:
1530 while pair is not None:
1519 if xdiff:
1531 if xdiff:
1520 mdiff.bdiff.xdiffblocks(*pair)
1532 mdiff.bdiff.xdiffblocks(*pair)
1521 elif blocks:
1533 elif blocks:
1522 mdiff.bdiff.blocks(*pair)
1534 mdiff.bdiff.blocks(*pair)
1523 else:
1535 else:
1524 mdiff.textdiff(*pair)
1536 mdiff.textdiff(*pair)
1525 q.task_done()
1537 q.task_done()
1526 pair = q.get()
1538 pair = q.get()
1527 q.task_done() # for the None one
1539 q.task_done() # for the None one
1528 with ready:
1540 with ready:
1529 ready.wait()
1541 ready.wait()
1530
1542
1531 def _manifestrevision(repo, mnode):
1543 def _manifestrevision(repo, mnode):
1532 ml = repo.manifestlog
1544 ml = repo.manifestlog
1533
1545
1534 if util.safehasattr(ml, b'getstorage'):
1546 if util.safehasattr(ml, b'getstorage'):
1535 store = ml.getstorage(b'')
1547 store = ml.getstorage(b'')
1536 else:
1548 else:
1537 store = ml._revlog
1549 store = ml._revlog
1538
1550
1539 return store.revision(mnode)
1551 return store.revision(mnode)
1540
1552
1541 @command(b'perfbdiff', revlogopts + formatteropts + [
1553 @command(b'perfbdiff', revlogopts + formatteropts + [
1542 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1554 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1543 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1555 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1544 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1556 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1545 (b'', b'blocks', False, b'test computing diffs into blocks'),
1557 (b'', b'blocks', False, b'test computing diffs into blocks'),
1546 (b'', b'xdiff', False, b'use xdiff algorithm'),
1558 (b'', b'xdiff', False, b'use xdiff algorithm'),
1547 ],
1559 ],
1548
1560
1549 b'-c|-m|FILE REV')
1561 b'-c|-m|FILE REV')
1550 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1562 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1551 """benchmark a bdiff between revisions
1563 """benchmark a bdiff between revisions
1552
1564
1553 By default, benchmark a bdiff between its delta parent and itself.
1565 By default, benchmark a bdiff between its delta parent and itself.
1554
1566
1555 With ``--count``, benchmark bdiffs between delta parents and self for N
1567 With ``--count``, benchmark bdiffs between delta parents and self for N
1556 revisions starting at the specified revision.
1568 revisions starting at the specified revision.
1557
1569
1558 With ``--alldata``, assume the requested revision is a changeset and
1570 With ``--alldata``, assume the requested revision is a changeset and
1559 measure bdiffs for all changes related to that changeset (manifest
1571 measure bdiffs for all changes related to that changeset (manifest
1560 and filelogs).
1572 and filelogs).
1561 """
1573 """
1562 opts = _byteskwargs(opts)
1574 opts = _byteskwargs(opts)
1563
1575
1564 if opts[b'xdiff'] and not opts[b'blocks']:
1576 if opts[b'xdiff'] and not opts[b'blocks']:
1565 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1577 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1566
1578
1567 if opts[b'alldata']:
1579 if opts[b'alldata']:
1568 opts[b'changelog'] = True
1580 opts[b'changelog'] = True
1569
1581
1570 if opts.get(b'changelog') or opts.get(b'manifest'):
1582 if opts.get(b'changelog') or opts.get(b'manifest'):
1571 file_, rev = None, file_
1583 file_, rev = None, file_
1572 elif rev is None:
1584 elif rev is None:
1573 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1585 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1574
1586
1575 blocks = opts[b'blocks']
1587 blocks = opts[b'blocks']
1576 xdiff = opts[b'xdiff']
1588 xdiff = opts[b'xdiff']
1577 textpairs = []
1589 textpairs = []
1578
1590
1579 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1591 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1580
1592
1581 startrev = r.rev(r.lookup(rev))
1593 startrev = r.rev(r.lookup(rev))
1582 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1594 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1583 if opts[b'alldata']:
1595 if opts[b'alldata']:
1584 # Load revisions associated with changeset.
1596 # Load revisions associated with changeset.
1585 ctx = repo[rev]
1597 ctx = repo[rev]
1586 mtext = _manifestrevision(repo, ctx.manifestnode())
1598 mtext = _manifestrevision(repo, ctx.manifestnode())
1587 for pctx in ctx.parents():
1599 for pctx in ctx.parents():
1588 pman = _manifestrevision(repo, pctx.manifestnode())
1600 pman = _manifestrevision(repo, pctx.manifestnode())
1589 textpairs.append((pman, mtext))
1601 textpairs.append((pman, mtext))
1590
1602
1591 # Load filelog revisions by iterating manifest delta.
1603 # Load filelog revisions by iterating manifest delta.
1592 man = ctx.manifest()
1604 man = ctx.manifest()
1593 pman = ctx.p1().manifest()
1605 pman = ctx.p1().manifest()
1594 for filename, change in pman.diff(man).items():
1606 for filename, change in pman.diff(man).items():
1595 fctx = repo.file(filename)
1607 fctx = repo.file(filename)
1596 f1 = fctx.revision(change[0][0] or -1)
1608 f1 = fctx.revision(change[0][0] or -1)
1597 f2 = fctx.revision(change[1][0] or -1)
1609 f2 = fctx.revision(change[1][0] or -1)
1598 textpairs.append((f1, f2))
1610 textpairs.append((f1, f2))
1599 else:
1611 else:
1600 dp = r.deltaparent(rev)
1612 dp = r.deltaparent(rev)
1601 textpairs.append((r.revision(dp), r.revision(rev)))
1613 textpairs.append((r.revision(dp), r.revision(rev)))
1602
1614
1603 withthreads = threads > 0
1615 withthreads = threads > 0
1604 if not withthreads:
1616 if not withthreads:
1605 def d():
1617 def d():
1606 for pair in textpairs:
1618 for pair in textpairs:
1607 if xdiff:
1619 if xdiff:
1608 mdiff.bdiff.xdiffblocks(*pair)
1620 mdiff.bdiff.xdiffblocks(*pair)
1609 elif blocks:
1621 elif blocks:
1610 mdiff.bdiff.blocks(*pair)
1622 mdiff.bdiff.blocks(*pair)
1611 else:
1623 else:
1612 mdiff.textdiff(*pair)
1624 mdiff.textdiff(*pair)
1613 else:
1625 else:
1614 q = queue()
1626 q = queue()
1615 for i in _xrange(threads):
1627 for i in _xrange(threads):
1616 q.put(None)
1628 q.put(None)
1617 ready = threading.Condition()
1629 ready = threading.Condition()
1618 done = threading.Event()
1630 done = threading.Event()
1619 for i in _xrange(threads):
1631 for i in _xrange(threads):
1620 threading.Thread(target=_bdiffworker,
1632 threading.Thread(target=_bdiffworker,
1621 args=(q, blocks, xdiff, ready, done)).start()
1633 args=(q, blocks, xdiff, ready, done)).start()
1622 q.join()
1634 q.join()
1623 def d():
1635 def d():
1624 for pair in textpairs:
1636 for pair in textpairs:
1625 q.put(pair)
1637 q.put(pair)
1626 for i in _xrange(threads):
1638 for i in _xrange(threads):
1627 q.put(None)
1639 q.put(None)
1628 with ready:
1640 with ready:
1629 ready.notify_all()
1641 ready.notify_all()
1630 q.join()
1642 q.join()
1631 timer, fm = gettimer(ui, opts)
1643 timer, fm = gettimer(ui, opts)
1632 timer(d)
1644 timer(d)
1633 fm.end()
1645 fm.end()
1634
1646
1635 if withthreads:
1647 if withthreads:
1636 done.set()
1648 done.set()
1637 for i in _xrange(threads):
1649 for i in _xrange(threads):
1638 q.put(None)
1650 q.put(None)
1639 with ready:
1651 with ready:
1640 ready.notify_all()
1652 ready.notify_all()
1641
1653
1642 @command(b'perfunidiff', revlogopts + formatteropts + [
1654 @command(b'perfunidiff', revlogopts + formatteropts + [
1643 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1655 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1644 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
1656 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
1645 ], b'-c|-m|FILE REV')
1657 ], b'-c|-m|FILE REV')
1646 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1658 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1647 """benchmark a unified diff between revisions
1659 """benchmark a unified diff between revisions
1648
1660
1649 This doesn't include any copy tracing - it's just a unified diff
1661 This doesn't include any copy tracing - it's just a unified diff
1650 of the texts.
1662 of the texts.
1651
1663
1652 By default, benchmark a diff between its delta parent and itself.
1664 By default, benchmark a diff between its delta parent and itself.
1653
1665
1654 With ``--count``, benchmark diffs between delta parents and self for N
1666 With ``--count``, benchmark diffs between delta parents and self for N
1655 revisions starting at the specified revision.
1667 revisions starting at the specified revision.
1656
1668
1657 With ``--alldata``, assume the requested revision is a changeset and
1669 With ``--alldata``, assume the requested revision is a changeset and
1658 measure diffs for all changes related to that changeset (manifest
1670 measure diffs for all changes related to that changeset (manifest
1659 and filelogs).
1671 and filelogs).
1660 """
1672 """
1661 opts = _byteskwargs(opts)
1673 opts = _byteskwargs(opts)
1662 if opts[b'alldata']:
1674 if opts[b'alldata']:
1663 opts[b'changelog'] = True
1675 opts[b'changelog'] = True
1664
1676
1665 if opts.get(b'changelog') or opts.get(b'manifest'):
1677 if opts.get(b'changelog') or opts.get(b'manifest'):
1666 file_, rev = None, file_
1678 file_, rev = None, file_
1667 elif rev is None:
1679 elif rev is None:
1668 raise error.CommandError(b'perfunidiff', b'invalid arguments')
1680 raise error.CommandError(b'perfunidiff', b'invalid arguments')
1669
1681
1670 textpairs = []
1682 textpairs = []
1671
1683
1672 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
1684 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
1673
1685
1674 startrev = r.rev(r.lookup(rev))
1686 startrev = r.rev(r.lookup(rev))
1675 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1687 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1676 if opts[b'alldata']:
1688 if opts[b'alldata']:
1677 # Load revisions associated with changeset.
1689 # Load revisions associated with changeset.
1678 ctx = repo[rev]
1690 ctx = repo[rev]
1679 mtext = _manifestrevision(repo, ctx.manifestnode())
1691 mtext = _manifestrevision(repo, ctx.manifestnode())
1680 for pctx in ctx.parents():
1692 for pctx in ctx.parents():
1681 pman = _manifestrevision(repo, pctx.manifestnode())
1693 pman = _manifestrevision(repo, pctx.manifestnode())
1682 textpairs.append((pman, mtext))
1694 textpairs.append((pman, mtext))
1683
1695
1684 # Load filelog revisions by iterating manifest delta.
1696 # Load filelog revisions by iterating manifest delta.
1685 man = ctx.manifest()
1697 man = ctx.manifest()
1686 pman = ctx.p1().manifest()
1698 pman = ctx.p1().manifest()
1687 for filename, change in pman.diff(man).items():
1699 for filename, change in pman.diff(man).items():
1688 fctx = repo.file(filename)
1700 fctx = repo.file(filename)
1689 f1 = fctx.revision(change[0][0] or -1)
1701 f1 = fctx.revision(change[0][0] or -1)
1690 f2 = fctx.revision(change[1][0] or -1)
1702 f2 = fctx.revision(change[1][0] or -1)
1691 textpairs.append((f1, f2))
1703 textpairs.append((f1, f2))
1692 else:
1704 else:
1693 dp = r.deltaparent(rev)
1705 dp = r.deltaparent(rev)
1694 textpairs.append((r.revision(dp), r.revision(rev)))
1706 textpairs.append((r.revision(dp), r.revision(rev)))
1695
1707
1696 def d():
1708 def d():
1697 for left, right in textpairs:
1709 for left, right in textpairs:
1698 # The date strings don't matter, so we pass empty strings.
1710 # The date strings don't matter, so we pass empty strings.
1699 headerlines, hunks = mdiff.unidiff(
1711 headerlines, hunks = mdiff.unidiff(
1700 left, b'', right, b'', b'left', b'right', binary=False)
1712 left, b'', right, b'', b'left', b'right', binary=False)
1701 # consume iterators in roughly the way patch.py does
1713 # consume iterators in roughly the way patch.py does
1702 b'\n'.join(headerlines)
1714 b'\n'.join(headerlines)
1703 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1715 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1704 timer, fm = gettimer(ui, opts)
1716 timer, fm = gettimer(ui, opts)
1705 timer(d)
1717 timer(d)
1706 fm.end()
1718 fm.end()
1707
1719
1708 @command(b'perfdiffwd', formatteropts)
1720 @command(b'perfdiffwd', formatteropts)
1709 def perfdiffwd(ui, repo, **opts):
1721 def perfdiffwd(ui, repo, **opts):
1710 """Profile diff of working directory changes"""
1722 """Profile diff of working directory changes"""
1711 opts = _byteskwargs(opts)
1723 opts = _byteskwargs(opts)
1712 timer, fm = gettimer(ui, opts)
1724 timer, fm = gettimer(ui, opts)
1713 options = {
1725 options = {
1714 'w': 'ignore_all_space',
1726 'w': 'ignore_all_space',
1715 'b': 'ignore_space_change',
1727 'b': 'ignore_space_change',
1716 'B': 'ignore_blank_lines',
1728 'B': 'ignore_blank_lines',
1717 }
1729 }
1718
1730
1719 for diffopt in ('', 'w', 'b', 'B', 'wB'):
1731 for diffopt in ('', 'w', 'b', 'B', 'wB'):
1720 opts = dict((options[c], b'1') for c in diffopt)
1732 opts = dict((options[c], b'1') for c in diffopt)
1721 def d():
1733 def d():
1722 ui.pushbuffer()
1734 ui.pushbuffer()
1723 commands.diff(ui, repo, **opts)
1735 commands.diff(ui, repo, **opts)
1724 ui.popbuffer()
1736 ui.popbuffer()
1725 diffopt = diffopt.encode('ascii')
1737 diffopt = diffopt.encode('ascii')
1726 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
1738 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
1727 timer(d, title=title)
1739 timer(d, title=title)
1728 fm.end()
1740 fm.end()
1729
1741
1730 @command(b'perfrevlogindex', revlogopts + formatteropts,
1742 @command(b'perfrevlogindex', revlogopts + formatteropts,
1731 b'-c|-m|FILE')
1743 b'-c|-m|FILE')
1732 def perfrevlogindex(ui, repo, file_=None, **opts):
1744 def perfrevlogindex(ui, repo, file_=None, **opts):
1733 """Benchmark operations against a revlog index.
1745 """Benchmark operations against a revlog index.
1734
1746
1735 This tests constructing a revlog instance, reading index data,
1747 This tests constructing a revlog instance, reading index data,
1736 parsing index data, and performing various operations related to
1748 parsing index data, and performing various operations related to
1737 index data.
1749 index data.
1738 """
1750 """
1739
1751
1740 opts = _byteskwargs(opts)
1752 opts = _byteskwargs(opts)
1741
1753
1742 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
1754 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
1743
1755
1744 opener = getattr(rl, 'opener') # trick linter
1756 opener = getattr(rl, 'opener') # trick linter
1745 indexfile = rl.indexfile
1757 indexfile = rl.indexfile
1746 data = opener.read(indexfile)
1758 data = opener.read(indexfile)
1747
1759
1748 header = struct.unpack(b'>I', data[0:4])[0]
1760 header = struct.unpack(b'>I', data[0:4])[0]
1749 version = header & 0xFFFF
1761 version = header & 0xFFFF
1750 if version == 1:
1762 if version == 1:
1751 revlogio = revlog.revlogio()
1763 revlogio = revlog.revlogio()
1752 inline = header & (1 << 16)
1764 inline = header & (1 << 16)
1753 else:
1765 else:
1754 raise error.Abort((b'unsupported revlog version: %d') % version)
1766 raise error.Abort((b'unsupported revlog version: %d') % version)
1755
1767
1756 rllen = len(rl)
1768 rllen = len(rl)
1757
1769
1758 node0 = rl.node(0)
1770 node0 = rl.node(0)
1759 node25 = rl.node(rllen // 4)
1771 node25 = rl.node(rllen // 4)
1760 node50 = rl.node(rllen // 2)
1772 node50 = rl.node(rllen // 2)
1761 node75 = rl.node(rllen // 4 * 3)
1773 node75 = rl.node(rllen // 4 * 3)
1762 node100 = rl.node(rllen - 1)
1774 node100 = rl.node(rllen - 1)
1763
1775
1764 allrevs = range(rllen)
1776 allrevs = range(rllen)
1765 allrevsrev = list(reversed(allrevs))
1777 allrevsrev = list(reversed(allrevs))
1766 allnodes = [rl.node(rev) for rev in range(rllen)]
1778 allnodes = [rl.node(rev) for rev in range(rllen)]
1767 allnodesrev = list(reversed(allnodes))
1779 allnodesrev = list(reversed(allnodes))
1768
1780
1769 def constructor():
1781 def constructor():
1770 revlog.revlog(opener, indexfile)
1782 revlog.revlog(opener, indexfile)
1771
1783
1772 def read():
1784 def read():
1773 with opener(indexfile) as fh:
1785 with opener(indexfile) as fh:
1774 fh.read()
1786 fh.read()
1775
1787
1776 def parseindex():
1788 def parseindex():
1777 revlogio.parseindex(data, inline)
1789 revlogio.parseindex(data, inline)
1778
1790
1779 def getentry(revornode):
1791 def getentry(revornode):
1780 index = revlogio.parseindex(data, inline)[0]
1792 index = revlogio.parseindex(data, inline)[0]
1781 index[revornode]
1793 index[revornode]
1782
1794
1783 def getentries(revs, count=1):
1795 def getentries(revs, count=1):
1784 index = revlogio.parseindex(data, inline)[0]
1796 index = revlogio.parseindex(data, inline)[0]
1785
1797
1786 for i in range(count):
1798 for i in range(count):
1787 for rev in revs:
1799 for rev in revs:
1788 index[rev]
1800 index[rev]
1789
1801
1790 def resolvenode(node):
1802 def resolvenode(node):
1791 nodemap = revlogio.parseindex(data, inline)[1]
1803 nodemap = revlogio.parseindex(data, inline)[1]
1792 # This only works for the C code.
1804 # This only works for the C code.
1793 if nodemap is None:
1805 if nodemap is None:
1794 return
1806 return
1795
1807
1796 try:
1808 try:
1797 nodemap[node]
1809 nodemap[node]
1798 except error.RevlogError:
1810 except error.RevlogError:
1799 pass
1811 pass
1800
1812
1801 def resolvenodes(nodes, count=1):
1813 def resolvenodes(nodes, count=1):
1802 nodemap = revlogio.parseindex(data, inline)[1]
1814 nodemap = revlogio.parseindex(data, inline)[1]
1803 if nodemap is None:
1815 if nodemap is None:
1804 return
1816 return
1805
1817
1806 for i in range(count):
1818 for i in range(count):
1807 for node in nodes:
1819 for node in nodes:
1808 try:
1820 try:
1809 nodemap[node]
1821 nodemap[node]
1810 except error.RevlogError:
1822 except error.RevlogError:
1811 pass
1823 pass
1812
1824
1813 benches = [
1825 benches = [
1814 (constructor, b'revlog constructor'),
1826 (constructor, b'revlog constructor'),
1815 (read, b'read'),
1827 (read, b'read'),
1816 (parseindex, b'create index object'),
1828 (parseindex, b'create index object'),
1817 (lambda: getentry(0), b'retrieve index entry for rev 0'),
1829 (lambda: getentry(0), b'retrieve index entry for rev 0'),
1818 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
1830 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
1819 (lambda: resolvenode(node0), b'look up node at rev 0'),
1831 (lambda: resolvenode(node0), b'look up node at rev 0'),
1820 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
1832 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
1821 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
1833 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
1822 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
1834 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
1823 (lambda: resolvenode(node100), b'look up node at tip'),
1835 (lambda: resolvenode(node100), b'look up node at tip'),
1824 # 2x variation is to measure caching impact.
1836 # 2x variation is to measure caching impact.
1825 (lambda: resolvenodes(allnodes),
1837 (lambda: resolvenodes(allnodes),
1826 b'look up all nodes (forward)'),
1838 b'look up all nodes (forward)'),
1827 (lambda: resolvenodes(allnodes, 2),
1839 (lambda: resolvenodes(allnodes, 2),
1828 b'look up all nodes 2x (forward)'),
1840 b'look up all nodes 2x (forward)'),
1829 (lambda: resolvenodes(allnodesrev),
1841 (lambda: resolvenodes(allnodesrev),
1830 b'look up all nodes (reverse)'),
1842 b'look up all nodes (reverse)'),
1831 (lambda: resolvenodes(allnodesrev, 2),
1843 (lambda: resolvenodes(allnodesrev, 2),
1832 b'look up all nodes 2x (reverse)'),
1844 b'look up all nodes 2x (reverse)'),
1833 (lambda: getentries(allrevs),
1845 (lambda: getentries(allrevs),
1834 b'retrieve all index entries (forward)'),
1846 b'retrieve all index entries (forward)'),
1835 (lambda: getentries(allrevs, 2),
1847 (lambda: getentries(allrevs, 2),
1836 b'retrieve all index entries 2x (forward)'),
1848 b'retrieve all index entries 2x (forward)'),
1837 (lambda: getentries(allrevsrev),
1849 (lambda: getentries(allrevsrev),
1838 b'retrieve all index entries (reverse)'),
1850 b'retrieve all index entries (reverse)'),
1839 (lambda: getentries(allrevsrev, 2),
1851 (lambda: getentries(allrevsrev, 2),
1840 b'retrieve all index entries 2x (reverse)'),
1852 b'retrieve all index entries 2x (reverse)'),
1841 ]
1853 ]
1842
1854
1843 for fn, title in benches:
1855 for fn, title in benches:
1844 timer, fm = gettimer(ui, opts)
1856 timer, fm = gettimer(ui, opts)
1845 timer(fn, title=title)
1857 timer(fn, title=title)
1846 fm.end()
1858 fm.end()
1847
1859
1848 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
1860 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
1849 [(b'd', b'dist', 100, b'distance between the revisions'),
1861 [(b'd', b'dist', 100, b'distance between the revisions'),
1850 (b's', b'startrev', 0, b'revision to start reading at'),
1862 (b's', b'startrev', 0, b'revision to start reading at'),
1851 (b'', b'reverse', False, b'read in reverse')],
1863 (b'', b'reverse', False, b'read in reverse')],
1852 b'-c|-m|FILE')
1864 b'-c|-m|FILE')
1853 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
1865 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
1854 **opts):
1866 **opts):
1855 """Benchmark reading a series of revisions from a revlog.
1867 """Benchmark reading a series of revisions from a revlog.
1856
1868
1857 By default, we read every ``-d/--dist`` revision from 0 to tip of
1869 By default, we read every ``-d/--dist`` revision from 0 to tip of
1858 the specified revlog.
1870 the specified revlog.
1859
1871
1860 The start revision can be defined via ``-s/--startrev``.
1872 The start revision can be defined via ``-s/--startrev``.
1861 """
1873 """
1862 opts = _byteskwargs(opts)
1874 opts = _byteskwargs(opts)
1863
1875
1864 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
1876 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
1865 rllen = getlen(ui)(rl)
1877 rllen = getlen(ui)(rl)
1866
1878
1867 if startrev < 0:
1879 if startrev < 0:
1868 startrev = rllen + startrev
1880 startrev = rllen + startrev
1869
1881
1870 def d():
1882 def d():
1871 rl.clearcaches()
1883 rl.clearcaches()
1872
1884
1873 beginrev = startrev
1885 beginrev = startrev
1874 endrev = rllen
1886 endrev = rllen
1875 dist = opts[b'dist']
1887 dist = opts[b'dist']
1876
1888
1877 if reverse:
1889 if reverse:
1878 beginrev, endrev = endrev - 1, beginrev - 1
1890 beginrev, endrev = endrev - 1, beginrev - 1
1879 dist = -1 * dist
1891 dist = -1 * dist
1880
1892
1881 for x in _xrange(beginrev, endrev, dist):
1893 for x in _xrange(beginrev, endrev, dist):
1882 # Old revisions don't support passing int.
1894 # Old revisions don't support passing int.
1883 n = rl.node(x)
1895 n = rl.node(x)
1884 rl.revision(n)
1896 rl.revision(n)
1885
1897
1886 timer, fm = gettimer(ui, opts)
1898 timer, fm = gettimer(ui, opts)
1887 timer(d)
1899 timer(d)
1888 fm.end()
1900 fm.end()
1889
1901
1890 @command(b'perfrevlogwrite', revlogopts + formatteropts +
1902 @command(b'perfrevlogwrite', revlogopts + formatteropts +
1891 [(b's', b'startrev', 1000, b'revision to start writing at'),
1903 [(b's', b'startrev', 1000, b'revision to start writing at'),
1892 (b'', b'stoprev', -1, b'last revision to write'),
1904 (b'', b'stoprev', -1, b'last revision to write'),
1893 (b'', b'count', 3, b'last revision to write'),
1905 (b'', b'count', 3, b'last revision to write'),
1894 (b'', b'details', False, b'print timing for every revisions tested'),
1906 (b'', b'details', False, b'print timing for every revisions tested'),
1895 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
1907 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
1896 (b'', b'lazydeltabase', True, b'try the provided delta first'),
1908 (b'', b'lazydeltabase', True, b'try the provided delta first'),
1897 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1909 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1898 ],
1910 ],
1899 b'-c|-m|FILE')
1911 b'-c|-m|FILE')
1900 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
1912 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
1901 """Benchmark writing a series of revisions to a revlog.
1913 """Benchmark writing a series of revisions to a revlog.
1902
1914
1903 Possible source values are:
1915 Possible source values are:
1904 * `full`: add from a full text (default).
1916 * `full`: add from a full text (default).
1905 * `parent-1`: add from a delta to the first parent
1917 * `parent-1`: add from a delta to the first parent
1906 * `parent-2`: add from a delta to the second parent if it exists
1918 * `parent-2`: add from a delta to the second parent if it exists
1907 (use a delta from the first parent otherwise)
1919 (use a delta from the first parent otherwise)
1908 * `parent-smallest`: add from the smallest delta (either p1 or p2)
1920 * `parent-smallest`: add from the smallest delta (either p1 or p2)
1909 * `storage`: add from the existing precomputed deltas
1921 * `storage`: add from the existing precomputed deltas
1910 """
1922 """
1911 opts = _byteskwargs(opts)
1923 opts = _byteskwargs(opts)
1912
1924
1913 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
1925 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
1914 rllen = getlen(ui)(rl)
1926 rllen = getlen(ui)(rl)
1915 if startrev < 0:
1927 if startrev < 0:
1916 startrev = rllen + startrev
1928 startrev = rllen + startrev
1917 if stoprev < 0:
1929 if stoprev < 0:
1918 stoprev = rllen + stoprev
1930 stoprev = rllen + stoprev
1919
1931
1920 lazydeltabase = opts['lazydeltabase']
1932 lazydeltabase = opts['lazydeltabase']
1921 source = opts['source']
1933 source = opts['source']
1922 clearcaches = opts['clear_caches']
1934 clearcaches = opts['clear_caches']
1923 validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest',
1935 validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest',
1924 b'storage')
1936 b'storage')
1925 if source not in validsource:
1937 if source not in validsource:
1926 raise error.Abort('invalid source type: %s' % source)
1938 raise error.Abort('invalid source type: %s' % source)
1927
1939
1928 ### actually gather results
1940 ### actually gather results
1929 count = opts['count']
1941 count = opts['count']
1930 if count <= 0:
1942 if count <= 0:
1931 raise error.Abort('invalide run count: %d' % count)
1943 raise error.Abort('invalide run count: %d' % count)
1932 allresults = []
1944 allresults = []
1933 for c in range(count):
1945 for c in range(count):
1934 timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1,
1946 timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1,
1935 lazydeltabase=lazydeltabase,
1947 lazydeltabase=lazydeltabase,
1936 clearcaches=clearcaches)
1948 clearcaches=clearcaches)
1937 allresults.append(timing)
1949 allresults.append(timing)
1938
1950
1939 ### consolidate the results in a single list
1951 ### consolidate the results in a single list
1940 results = []
1952 results = []
1941 for idx, (rev, t) in enumerate(allresults[0]):
1953 for idx, (rev, t) in enumerate(allresults[0]):
1942 ts = [t]
1954 ts = [t]
1943 for other in allresults[1:]:
1955 for other in allresults[1:]:
1944 orev, ot = other[idx]
1956 orev, ot = other[idx]
1945 assert orev == rev
1957 assert orev == rev
1946 ts.append(ot)
1958 ts.append(ot)
1947 results.append((rev, ts))
1959 results.append((rev, ts))
1948 resultcount = len(results)
1960 resultcount = len(results)
1949
1961
1950 ### Compute and display relevant statistics
1962 ### Compute and display relevant statistics
1951
1963
1952 # get a formatter
1964 # get a formatter
1953 fm = ui.formatter(b'perf', opts)
1965 fm = ui.formatter(b'perf', opts)
1954 displayall = ui.configbool(b"perf", b"all-timing", False)
1966 displayall = ui.configbool(b"perf", b"all-timing", False)
1955
1967
1956 # print individual details if requested
1968 # print individual details if requested
1957 if opts['details']:
1969 if opts['details']:
1958 for idx, item in enumerate(results, 1):
1970 for idx, item in enumerate(results, 1):
1959 rev, data = item
1971 rev, data = item
1960 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
1972 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
1961 formatone(fm, data, title=title, displayall=displayall)
1973 formatone(fm, data, title=title, displayall=displayall)
1962
1974
1963 # sorts results by median time
1975 # sorts results by median time
1964 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
1976 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
1965 # list of (name, index) to display)
1977 # list of (name, index) to display)
1966 relevants = [
1978 relevants = [
1967 ("min", 0),
1979 ("min", 0),
1968 ("10%", resultcount * 10 // 100),
1980 ("10%", resultcount * 10 // 100),
1969 ("25%", resultcount * 25 // 100),
1981 ("25%", resultcount * 25 // 100),
1970 ("50%", resultcount * 70 // 100),
1982 ("50%", resultcount * 70 // 100),
1971 ("75%", resultcount * 75 // 100),
1983 ("75%", resultcount * 75 // 100),
1972 ("90%", resultcount * 90 // 100),
1984 ("90%", resultcount * 90 // 100),
1973 ("95%", resultcount * 95 // 100),
1985 ("95%", resultcount * 95 // 100),
1974 ("99%", resultcount * 99 // 100),
1986 ("99%", resultcount * 99 // 100),
1975 ("99.9%", resultcount * 999 // 1000),
1987 ("99.9%", resultcount * 999 // 1000),
1976 ("99.99%", resultcount * 9999 // 10000),
1988 ("99.99%", resultcount * 9999 // 10000),
1977 ("99.999%", resultcount * 99999 // 100000),
1989 ("99.999%", resultcount * 99999 // 100000),
1978 ("max", -1),
1990 ("max", -1),
1979 ]
1991 ]
1980 if not ui.quiet:
1992 if not ui.quiet:
1981 for name, idx in relevants:
1993 for name, idx in relevants:
1982 data = results[idx]
1994 data = results[idx]
1983 title = '%s of %d, rev %d' % (name, resultcount, data[0])
1995 title = '%s of %d, rev %d' % (name, resultcount, data[0])
1984 formatone(fm, data[1], title=title, displayall=displayall)
1996 formatone(fm, data[1], title=title, displayall=displayall)
1985
1997
1986 # XXX summing that many float will not be very precise, we ignore this fact
1998 # XXX summing that many float will not be very precise, we ignore this fact
1987 # for now
1999 # for now
1988 totaltime = []
2000 totaltime = []
1989 for item in allresults:
2001 for item in allresults:
1990 totaltime.append((sum(x[1][0] for x in item),
2002 totaltime.append((sum(x[1][0] for x in item),
1991 sum(x[1][1] for x in item),
2003 sum(x[1][1] for x in item),
1992 sum(x[1][2] for x in item),)
2004 sum(x[1][2] for x in item),)
1993 )
2005 )
1994 formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
2006 formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
1995 displayall=displayall)
2007 displayall=displayall)
1996 fm.end()
2008 fm.end()
1997
2009
1998 class _faketr(object):
2010 class _faketr(object):
1999 def add(s, x, y, z=None):
2011 def add(s, x, y, z=None):
2000 return None
2012 return None
2001
2013
2002 def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None,
2014 def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None,
2003 lazydeltabase=True, clearcaches=True):
2015 lazydeltabase=True, clearcaches=True):
2004 timings = []
2016 timings = []
2005 tr = _faketr()
2017 tr = _faketr()
2006 with _temprevlog(ui, orig, startrev) as dest:
2018 with _temprevlog(ui, orig, startrev) as dest:
2007 dest._lazydeltabase = lazydeltabase
2019 dest._lazydeltabase = lazydeltabase
2008 revs = list(orig.revs(startrev, stoprev))
2020 revs = list(orig.revs(startrev, stoprev))
2009 total = len(revs)
2021 total = len(revs)
2010 topic = 'adding'
2022 topic = 'adding'
2011 if runidx is not None:
2023 if runidx is not None:
2012 topic += ' (run #%d)' % runidx
2024 topic += ' (run #%d)' % runidx
2013 # Support both old and new progress API
2025 # Support both old and new progress API
2014 if util.safehasattr(ui, 'makeprogress'):
2026 if util.safehasattr(ui, 'makeprogress'):
2015 progress = ui.makeprogress(topic, unit='revs', total=total)
2027 progress = ui.makeprogress(topic, unit='revs', total=total)
2016 def updateprogress(pos):
2028 def updateprogress(pos):
2017 progress.update(pos)
2029 progress.update(pos)
2018 def completeprogress():
2030 def completeprogress():
2019 progress.complete()
2031 progress.complete()
2020 else:
2032 else:
2021 def updateprogress(pos):
2033 def updateprogress(pos):
2022 ui.progress(topic, pos, unit='revs', total=total)
2034 ui.progress(topic, pos, unit='revs', total=total)
2023 def completeprogress():
2035 def completeprogress():
2024 ui.progress(topic, None, unit='revs', total=total)
2036 ui.progress(topic, None, unit='revs', total=total)
2025
2037
2026 for idx, rev in enumerate(revs):
2038 for idx, rev in enumerate(revs):
2027 updateprogress(idx)
2039 updateprogress(idx)
2028 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
2040 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
2029 if clearcaches:
2041 if clearcaches:
2030 dest.index.clearcaches()
2042 dest.index.clearcaches()
2031 dest.clearcaches()
2043 dest.clearcaches()
2032 with timeone() as r:
2044 with timeone() as r:
2033 dest.addrawrevision(*addargs, **addkwargs)
2045 dest.addrawrevision(*addargs, **addkwargs)
2034 timings.append((rev, r[0]))
2046 timings.append((rev, r[0]))
2035 updateprogress(total)
2047 updateprogress(total)
2036 completeprogress()
2048 completeprogress()
2037 return timings
2049 return timings
2038
2050
2039 def _getrevisionseed(orig, rev, tr, source):
2051 def _getrevisionseed(orig, rev, tr, source):
2040 from mercurial.node import nullid
2052 from mercurial.node import nullid
2041
2053
2042 linkrev = orig.linkrev(rev)
2054 linkrev = orig.linkrev(rev)
2043 node = orig.node(rev)
2055 node = orig.node(rev)
2044 p1, p2 = orig.parents(node)
2056 p1, p2 = orig.parents(node)
2045 flags = orig.flags(rev)
2057 flags = orig.flags(rev)
2046 cachedelta = None
2058 cachedelta = None
2047 text = None
2059 text = None
2048
2060
2049 if source == b'full':
2061 if source == b'full':
2050 text = orig.revision(rev)
2062 text = orig.revision(rev)
2051 elif source == b'parent-1':
2063 elif source == b'parent-1':
2052 baserev = orig.rev(p1)
2064 baserev = orig.rev(p1)
2053 cachedelta = (baserev, orig.revdiff(p1, rev))
2065 cachedelta = (baserev, orig.revdiff(p1, rev))
2054 elif source == b'parent-2':
2066 elif source == b'parent-2':
2055 parent = p2
2067 parent = p2
2056 if p2 == nullid:
2068 if p2 == nullid:
2057 parent = p1
2069 parent = p1
2058 baserev = orig.rev(parent)
2070 baserev = orig.rev(parent)
2059 cachedelta = (baserev, orig.revdiff(parent, rev))
2071 cachedelta = (baserev, orig.revdiff(parent, rev))
2060 elif source == b'parent-smallest':
2072 elif source == b'parent-smallest':
2061 p1diff = orig.revdiff(p1, rev)
2073 p1diff = orig.revdiff(p1, rev)
2062 parent = p1
2074 parent = p1
2063 diff = p1diff
2075 diff = p1diff
2064 if p2 != nullid:
2076 if p2 != nullid:
2065 p2diff = orig.revdiff(p2, rev)
2077 p2diff = orig.revdiff(p2, rev)
2066 if len(p1diff) > len(p2diff):
2078 if len(p1diff) > len(p2diff):
2067 parent = p2
2079 parent = p2
2068 diff = p2diff
2080 diff = p2diff
2069 baserev = orig.rev(parent)
2081 baserev = orig.rev(parent)
2070 cachedelta = (baserev, diff)
2082 cachedelta = (baserev, diff)
2071 elif source == b'storage':
2083 elif source == b'storage':
2072 baserev = orig.deltaparent(rev)
2084 baserev = orig.deltaparent(rev)
2073 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
2085 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
2074
2086
2075 return ((text, tr, linkrev, p1, p2),
2087 return ((text, tr, linkrev, p1, p2),
2076 {'node': node, 'flags': flags, 'cachedelta': cachedelta})
2088 {'node': node, 'flags': flags, 'cachedelta': cachedelta})
2077
2089
2078 @contextlib.contextmanager
2090 @contextlib.contextmanager
2079 def _temprevlog(ui, orig, truncaterev):
2091 def _temprevlog(ui, orig, truncaterev):
2080 from mercurial import vfs as vfsmod
2092 from mercurial import vfs as vfsmod
2081
2093
2082 if orig._inline:
2094 if orig._inline:
2083 raise error.Abort('not supporting inline revlog (yet)')
2095 raise error.Abort('not supporting inline revlog (yet)')
2084
2096
2085 origindexpath = orig.opener.join(orig.indexfile)
2097 origindexpath = orig.opener.join(orig.indexfile)
2086 origdatapath = orig.opener.join(orig.datafile)
2098 origdatapath = orig.opener.join(orig.datafile)
2087 indexname = 'revlog.i'
2099 indexname = 'revlog.i'
2088 dataname = 'revlog.d'
2100 dataname = 'revlog.d'
2089
2101
2090 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
2102 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
2091 try:
2103 try:
2092 # copy the data file in a temporary directory
2104 # copy the data file in a temporary directory
2093 ui.debug('copying data in %s\n' % tmpdir)
2105 ui.debug('copying data in %s\n' % tmpdir)
2094 destindexpath = os.path.join(tmpdir, 'revlog.i')
2106 destindexpath = os.path.join(tmpdir, 'revlog.i')
2095 destdatapath = os.path.join(tmpdir, 'revlog.d')
2107 destdatapath = os.path.join(tmpdir, 'revlog.d')
2096 shutil.copyfile(origindexpath, destindexpath)
2108 shutil.copyfile(origindexpath, destindexpath)
2097 shutil.copyfile(origdatapath, destdatapath)
2109 shutil.copyfile(origdatapath, destdatapath)
2098
2110
2099 # remove the data we want to add again
2111 # remove the data we want to add again
2100 ui.debug('truncating data to be rewritten\n')
2112 ui.debug('truncating data to be rewritten\n')
2101 with open(destindexpath, 'ab') as index:
2113 with open(destindexpath, 'ab') as index:
2102 index.seek(0)
2114 index.seek(0)
2103 index.truncate(truncaterev * orig._io.size)
2115 index.truncate(truncaterev * orig._io.size)
2104 with open(destdatapath, 'ab') as data:
2116 with open(destdatapath, 'ab') as data:
2105 data.seek(0)
2117 data.seek(0)
2106 data.truncate(orig.start(truncaterev))
2118 data.truncate(orig.start(truncaterev))
2107
2119
2108 # instantiate a new revlog from the temporary copy
2120 # instantiate a new revlog from the temporary copy
2109 ui.debug('truncating adding to be rewritten\n')
2121 ui.debug('truncating adding to be rewritten\n')
2110 vfs = vfsmod.vfs(tmpdir)
2122 vfs = vfsmod.vfs(tmpdir)
2111 vfs.options = getattr(orig.opener, 'options', None)
2123 vfs.options = getattr(orig.opener, 'options', None)
2112
2124
2113 dest = revlog.revlog(vfs,
2125 dest = revlog.revlog(vfs,
2114 indexfile=indexname,
2126 indexfile=indexname,
2115 datafile=dataname)
2127 datafile=dataname)
2116 if dest._inline:
2128 if dest._inline:
2117 raise error.Abort('not supporting inline revlog (yet)')
2129 raise error.Abort('not supporting inline revlog (yet)')
2118 # make sure internals are initialized
2130 # make sure internals are initialized
2119 dest.revision(len(dest) - 1)
2131 dest.revision(len(dest) - 1)
2120 yield dest
2132 yield dest
2121 del dest, vfs
2133 del dest, vfs
2122 finally:
2134 finally:
2123 shutil.rmtree(tmpdir, True)
2135 shutil.rmtree(tmpdir, True)
2124
2136
2125 @command(b'perfrevlogchunks', revlogopts + formatteropts +
2137 @command(b'perfrevlogchunks', revlogopts + formatteropts +
2126 [(b'e', b'engines', b'', b'compression engines to use'),
2138 [(b'e', b'engines', b'', b'compression engines to use'),
2127 (b's', b'startrev', 0, b'revision to start at')],
2139 (b's', b'startrev', 0, b'revision to start at')],
2128 b'-c|-m|FILE')
2140 b'-c|-m|FILE')
2129 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
2141 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
2130 """Benchmark operations on revlog chunks.
2142 """Benchmark operations on revlog chunks.
2131
2143
2132 Logically, each revlog is a collection of fulltext revisions. However,
2144 Logically, each revlog is a collection of fulltext revisions. However,
2133 stored within each revlog are "chunks" of possibly compressed data. This
2145 stored within each revlog are "chunks" of possibly compressed data. This
2134 data needs to be read and decompressed or compressed and written.
2146 data needs to be read and decompressed or compressed and written.
2135
2147
2136 This command measures the time it takes to read+decompress and recompress
2148 This command measures the time it takes to read+decompress and recompress
2137 chunks in a revlog. It effectively isolates I/O and compression performance.
2149 chunks in a revlog. It effectively isolates I/O and compression performance.
2138 For measurements of higher-level operations like resolving revisions,
2150 For measurements of higher-level operations like resolving revisions,
2139 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
2151 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
2140 """
2152 """
2141 opts = _byteskwargs(opts)
2153 opts = _byteskwargs(opts)
2142
2154
2143 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
2155 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
2144
2156
2145 # _chunkraw was renamed to _getsegmentforrevs.
2157 # _chunkraw was renamed to _getsegmentforrevs.
2146 try:
2158 try:
2147 segmentforrevs = rl._getsegmentforrevs
2159 segmentforrevs = rl._getsegmentforrevs
2148 except AttributeError:
2160 except AttributeError:
2149 segmentforrevs = rl._chunkraw
2161 segmentforrevs = rl._chunkraw
2150
2162
2151 # Verify engines argument.
2163 # Verify engines argument.
2152 if engines:
2164 if engines:
2153 engines = set(e.strip() for e in engines.split(b','))
2165 engines = set(e.strip() for e in engines.split(b','))
2154 for engine in engines:
2166 for engine in engines:
2155 try:
2167 try:
2156 util.compressionengines[engine]
2168 util.compressionengines[engine]
2157 except KeyError:
2169 except KeyError:
2158 raise error.Abort(b'unknown compression engine: %s' % engine)
2170 raise error.Abort(b'unknown compression engine: %s' % engine)
2159 else:
2171 else:
2160 engines = []
2172 engines = []
2161 for e in util.compengines:
2173 for e in util.compengines:
2162 engine = util.compengines[e]
2174 engine = util.compengines[e]
2163 try:
2175 try:
2164 if engine.available():
2176 if engine.available():
2165 engine.revlogcompressor().compress(b'dummy')
2177 engine.revlogcompressor().compress(b'dummy')
2166 engines.append(e)
2178 engines.append(e)
2167 except NotImplementedError:
2179 except NotImplementedError:
2168 pass
2180 pass
2169
2181
2170 revs = list(rl.revs(startrev, len(rl) - 1))
2182 revs = list(rl.revs(startrev, len(rl) - 1))
2171
2183
2172 def rlfh(rl):
2184 def rlfh(rl):
2173 if rl._inline:
2185 if rl._inline:
2174 return getsvfs(repo)(rl.indexfile)
2186 return getsvfs(repo)(rl.indexfile)
2175 else:
2187 else:
2176 return getsvfs(repo)(rl.datafile)
2188 return getsvfs(repo)(rl.datafile)
2177
2189
2178 def doread():
2190 def doread():
2179 rl.clearcaches()
2191 rl.clearcaches()
2180 for rev in revs:
2192 for rev in revs:
2181 segmentforrevs(rev, rev)
2193 segmentforrevs(rev, rev)
2182
2194
2183 def doreadcachedfh():
2195 def doreadcachedfh():
2184 rl.clearcaches()
2196 rl.clearcaches()
2185 fh = rlfh(rl)
2197 fh = rlfh(rl)
2186 for rev in revs:
2198 for rev in revs:
2187 segmentforrevs(rev, rev, df=fh)
2199 segmentforrevs(rev, rev, df=fh)
2188
2200
2189 def doreadbatch():
2201 def doreadbatch():
2190 rl.clearcaches()
2202 rl.clearcaches()
2191 segmentforrevs(revs[0], revs[-1])
2203 segmentforrevs(revs[0], revs[-1])
2192
2204
2193 def doreadbatchcachedfh():
2205 def doreadbatchcachedfh():
2194 rl.clearcaches()
2206 rl.clearcaches()
2195 fh = rlfh(rl)
2207 fh = rlfh(rl)
2196 segmentforrevs(revs[0], revs[-1], df=fh)
2208 segmentforrevs(revs[0], revs[-1], df=fh)
2197
2209
2198 def dochunk():
2210 def dochunk():
2199 rl.clearcaches()
2211 rl.clearcaches()
2200 fh = rlfh(rl)
2212 fh = rlfh(rl)
2201 for rev in revs:
2213 for rev in revs:
2202 rl._chunk(rev, df=fh)
2214 rl._chunk(rev, df=fh)
2203
2215
2204 chunks = [None]
2216 chunks = [None]
2205
2217
2206 def dochunkbatch():
2218 def dochunkbatch():
2207 rl.clearcaches()
2219 rl.clearcaches()
2208 fh = rlfh(rl)
2220 fh = rlfh(rl)
2209 # Save chunks as a side-effect.
2221 # Save chunks as a side-effect.
2210 chunks[0] = rl._chunks(revs, df=fh)
2222 chunks[0] = rl._chunks(revs, df=fh)
2211
2223
2212 def docompress(compressor):
2224 def docompress(compressor):
2213 rl.clearcaches()
2225 rl.clearcaches()
2214
2226
2215 try:
2227 try:
2216 # Swap in the requested compression engine.
2228 # Swap in the requested compression engine.
2217 oldcompressor = rl._compressor
2229 oldcompressor = rl._compressor
2218 rl._compressor = compressor
2230 rl._compressor = compressor
2219 for chunk in chunks[0]:
2231 for chunk in chunks[0]:
2220 rl.compress(chunk)
2232 rl.compress(chunk)
2221 finally:
2233 finally:
2222 rl._compressor = oldcompressor
2234 rl._compressor = oldcompressor
2223
2235
2224 benches = [
2236 benches = [
2225 (lambda: doread(), b'read'),
2237 (lambda: doread(), b'read'),
2226 (lambda: doreadcachedfh(), b'read w/ reused fd'),
2238 (lambda: doreadcachedfh(), b'read w/ reused fd'),
2227 (lambda: doreadbatch(), b'read batch'),
2239 (lambda: doreadbatch(), b'read batch'),
2228 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
2240 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
2229 (lambda: dochunk(), b'chunk'),
2241 (lambda: dochunk(), b'chunk'),
2230 (lambda: dochunkbatch(), b'chunk batch'),
2242 (lambda: dochunkbatch(), b'chunk batch'),
2231 ]
2243 ]
2232
2244
2233 for engine in sorted(engines):
2245 for engine in sorted(engines):
2234 compressor = util.compengines[engine].revlogcompressor()
2246 compressor = util.compengines[engine].revlogcompressor()
2235 benches.append((functools.partial(docompress, compressor),
2247 benches.append((functools.partial(docompress, compressor),
2236 b'compress w/ %s' % engine))
2248 b'compress w/ %s' % engine))
2237
2249
2238 for fn, title in benches:
2250 for fn, title in benches:
2239 timer, fm = gettimer(ui, opts)
2251 timer, fm = gettimer(ui, opts)
2240 timer(fn, title=title)
2252 timer(fn, title=title)
2241 fm.end()
2253 fm.end()
2242
2254
2243 @command(b'perfrevlogrevision', revlogopts + formatteropts +
2255 @command(b'perfrevlogrevision', revlogopts + formatteropts +
2244 [(b'', b'cache', False, b'use caches instead of clearing')],
2256 [(b'', b'cache', False, b'use caches instead of clearing')],
2245 b'-c|-m|FILE REV')
2257 b'-c|-m|FILE REV')
2246 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
2258 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
2247 """Benchmark obtaining a revlog revision.
2259 """Benchmark obtaining a revlog revision.
2248
2260
2249 Obtaining a revlog revision consists of roughly the following steps:
2261 Obtaining a revlog revision consists of roughly the following steps:
2250
2262
2251 1. Compute the delta chain
2263 1. Compute the delta chain
2252 2. Slice the delta chain if applicable
2264 2. Slice the delta chain if applicable
2253 3. Obtain the raw chunks for that delta chain
2265 3. Obtain the raw chunks for that delta chain
2254 4. Decompress each raw chunk
2266 4. Decompress each raw chunk
2255 5. Apply binary patches to obtain fulltext
2267 5. Apply binary patches to obtain fulltext
2256 6. Verify hash of fulltext
2268 6. Verify hash of fulltext
2257
2269
2258 This command measures the time spent in each of these phases.
2270 This command measures the time spent in each of these phases.
2259 """
2271 """
2260 opts = _byteskwargs(opts)
2272 opts = _byteskwargs(opts)
2261
2273
2262 if opts.get(b'changelog') or opts.get(b'manifest'):
2274 if opts.get(b'changelog') or opts.get(b'manifest'):
2263 file_, rev = None, file_
2275 file_, rev = None, file_
2264 elif rev is None:
2276 elif rev is None:
2265 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
2277 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
2266
2278
2267 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
2279 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
2268
2280
2269 # _chunkraw was renamed to _getsegmentforrevs.
2281 # _chunkraw was renamed to _getsegmentforrevs.
2270 try:
2282 try:
2271 segmentforrevs = r._getsegmentforrevs
2283 segmentforrevs = r._getsegmentforrevs
2272 except AttributeError:
2284 except AttributeError:
2273 segmentforrevs = r._chunkraw
2285 segmentforrevs = r._chunkraw
2274
2286
2275 node = r.lookup(rev)
2287 node = r.lookup(rev)
2276 rev = r.rev(node)
2288 rev = r.rev(node)
2277
2289
2278 def getrawchunks(data, chain):
2290 def getrawchunks(data, chain):
2279 start = r.start
2291 start = r.start
2280 length = r.length
2292 length = r.length
2281 inline = r._inline
2293 inline = r._inline
2282 iosize = r._io.size
2294 iosize = r._io.size
2283 buffer = util.buffer
2295 buffer = util.buffer
2284
2296
2285 chunks = []
2297 chunks = []
2286 ladd = chunks.append
2298 ladd = chunks.append
2287 for idx, item in enumerate(chain):
2299 for idx, item in enumerate(chain):
2288 offset = start(item[0])
2300 offset = start(item[0])
2289 bits = data[idx]
2301 bits = data[idx]
2290 for rev in item:
2302 for rev in item:
2291 chunkstart = start(rev)
2303 chunkstart = start(rev)
2292 if inline:
2304 if inline:
2293 chunkstart += (rev + 1) * iosize
2305 chunkstart += (rev + 1) * iosize
2294 chunklength = length(rev)
2306 chunklength = length(rev)
2295 ladd(buffer(bits, chunkstart - offset, chunklength))
2307 ladd(buffer(bits, chunkstart - offset, chunklength))
2296
2308
2297 return chunks
2309 return chunks
2298
2310
2299 def dodeltachain(rev):
2311 def dodeltachain(rev):
2300 if not cache:
2312 if not cache:
2301 r.clearcaches()
2313 r.clearcaches()
2302 r._deltachain(rev)
2314 r._deltachain(rev)
2303
2315
2304 def doread(chain):
2316 def doread(chain):
2305 if not cache:
2317 if not cache:
2306 r.clearcaches()
2318 r.clearcaches()
2307 for item in slicedchain:
2319 for item in slicedchain:
2308 segmentforrevs(item[0], item[-1])
2320 segmentforrevs(item[0], item[-1])
2309
2321
2310 def doslice(r, chain, size):
2322 def doslice(r, chain, size):
2311 for s in slicechunk(r, chain, targetsize=size):
2323 for s in slicechunk(r, chain, targetsize=size):
2312 pass
2324 pass
2313
2325
2314 def dorawchunks(data, chain):
2326 def dorawchunks(data, chain):
2315 if not cache:
2327 if not cache:
2316 r.clearcaches()
2328 r.clearcaches()
2317 getrawchunks(data, chain)
2329 getrawchunks(data, chain)
2318
2330
2319 def dodecompress(chunks):
2331 def dodecompress(chunks):
2320 decomp = r.decompress
2332 decomp = r.decompress
2321 for chunk in chunks:
2333 for chunk in chunks:
2322 decomp(chunk)
2334 decomp(chunk)
2323
2335
2324 def dopatch(text, bins):
2336 def dopatch(text, bins):
2325 if not cache:
2337 if not cache:
2326 r.clearcaches()
2338 r.clearcaches()
2327 mdiff.patches(text, bins)
2339 mdiff.patches(text, bins)
2328
2340
2329 def dohash(text):
2341 def dohash(text):
2330 if not cache:
2342 if not cache:
2331 r.clearcaches()
2343 r.clearcaches()
2332 r.checkhash(text, node, rev=rev)
2344 r.checkhash(text, node, rev=rev)
2333
2345
2334 def dorevision():
2346 def dorevision():
2335 if not cache:
2347 if not cache:
2336 r.clearcaches()
2348 r.clearcaches()
2337 r.revision(node)
2349 r.revision(node)
2338
2350
2339 try:
2351 try:
2340 from mercurial.revlogutils.deltas import slicechunk
2352 from mercurial.revlogutils.deltas import slicechunk
2341 except ImportError:
2353 except ImportError:
2342 slicechunk = getattr(revlog, '_slicechunk', None)
2354 slicechunk = getattr(revlog, '_slicechunk', None)
2343
2355
2344 size = r.length(rev)
2356 size = r.length(rev)
2345 chain = r._deltachain(rev)[0]
2357 chain = r._deltachain(rev)[0]
2346 if not getattr(r, '_withsparseread', False):
2358 if not getattr(r, '_withsparseread', False):
2347 slicedchain = (chain,)
2359 slicedchain = (chain,)
2348 else:
2360 else:
2349 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
2361 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
2350 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
2362 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
2351 rawchunks = getrawchunks(data, slicedchain)
2363 rawchunks = getrawchunks(data, slicedchain)
2352 bins = r._chunks(chain)
2364 bins = r._chunks(chain)
2353 text = bytes(bins[0])
2365 text = bytes(bins[0])
2354 bins = bins[1:]
2366 bins = bins[1:]
2355 text = mdiff.patches(text, bins)
2367 text = mdiff.patches(text, bins)
2356
2368
2357 benches = [
2369 benches = [
2358 (lambda: dorevision(), b'full'),
2370 (lambda: dorevision(), b'full'),
2359 (lambda: dodeltachain(rev), b'deltachain'),
2371 (lambda: dodeltachain(rev), b'deltachain'),
2360 (lambda: doread(chain), b'read'),
2372 (lambda: doread(chain), b'read'),
2361 ]
2373 ]
2362
2374
2363 if getattr(r, '_withsparseread', False):
2375 if getattr(r, '_withsparseread', False):
2364 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
2376 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
2365 benches.append(slicing)
2377 benches.append(slicing)
2366
2378
2367 benches.extend([
2379 benches.extend([
2368 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
2380 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
2369 (lambda: dodecompress(rawchunks), b'decompress'),
2381 (lambda: dodecompress(rawchunks), b'decompress'),
2370 (lambda: dopatch(text, bins), b'patch'),
2382 (lambda: dopatch(text, bins), b'patch'),
2371 (lambda: dohash(text), b'hash'),
2383 (lambda: dohash(text), b'hash'),
2372 ])
2384 ])
2373
2385
2374 timer, fm = gettimer(ui, opts)
2386 timer, fm = gettimer(ui, opts)
2375 for fn, title in benches:
2387 for fn, title in benches:
2376 timer(fn, title=title)
2388 timer(fn, title=title)
2377 fm.end()
2389 fm.end()
2378
2390
2379 @command(b'perfrevset',
2391 @command(b'perfrevset',
2380 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
2392 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
2381 (b'', b'contexts', False, b'obtain changectx for each revision')]
2393 (b'', b'contexts', False, b'obtain changectx for each revision')]
2382 + formatteropts, b"REVSET")
2394 + formatteropts, b"REVSET")
2383 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
2395 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
2384 """benchmark the execution time of a revset
2396 """benchmark the execution time of a revset
2385
2397
2386 Use the --clean option if need to evaluate the impact of build volatile
2398 Use the --clean option if need to evaluate the impact of build volatile
2387 revisions set cache on the revset execution. Volatile cache hold filtered
2399 revisions set cache on the revset execution. Volatile cache hold filtered
2388 and obsolete related cache."""
2400 and obsolete related cache."""
2389 opts = _byteskwargs(opts)
2401 opts = _byteskwargs(opts)
2390
2402
2391 timer, fm = gettimer(ui, opts)
2403 timer, fm = gettimer(ui, opts)
2392 def d():
2404 def d():
2393 if clear:
2405 if clear:
2394 repo.invalidatevolatilesets()
2406 repo.invalidatevolatilesets()
2395 if contexts:
2407 if contexts:
2396 for ctx in repo.set(expr): pass
2408 for ctx in repo.set(expr): pass
2397 else:
2409 else:
2398 for r in repo.revs(expr): pass
2410 for r in repo.revs(expr): pass
2399 timer(d)
2411 timer(d)
2400 fm.end()
2412 fm.end()
2401
2413
2402 @command(b'perfvolatilesets',
2414 @command(b'perfvolatilesets',
2403 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
2415 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
2404 ] + formatteropts)
2416 ] + formatteropts)
2405 def perfvolatilesets(ui, repo, *names, **opts):
2417 def perfvolatilesets(ui, repo, *names, **opts):
2406 """benchmark the computation of various volatile set
2418 """benchmark the computation of various volatile set
2407
2419
2408 Volatile set computes element related to filtering and obsolescence."""
2420 Volatile set computes element related to filtering and obsolescence."""
2409 opts = _byteskwargs(opts)
2421 opts = _byteskwargs(opts)
2410 timer, fm = gettimer(ui, opts)
2422 timer, fm = gettimer(ui, opts)
2411 repo = repo.unfiltered()
2423 repo = repo.unfiltered()
2412
2424
2413 def getobs(name):
2425 def getobs(name):
2414 def d():
2426 def d():
2415 repo.invalidatevolatilesets()
2427 repo.invalidatevolatilesets()
2416 if opts[b'clear_obsstore']:
2428 if opts[b'clear_obsstore']:
2417 clearfilecache(repo, b'obsstore')
2429 clearfilecache(repo, b'obsstore')
2418 obsolete.getrevs(repo, name)
2430 obsolete.getrevs(repo, name)
2419 return d
2431 return d
2420
2432
2421 allobs = sorted(obsolete.cachefuncs)
2433 allobs = sorted(obsolete.cachefuncs)
2422 if names:
2434 if names:
2423 allobs = [n for n in allobs if n in names]
2435 allobs = [n for n in allobs if n in names]
2424
2436
2425 for name in allobs:
2437 for name in allobs:
2426 timer(getobs(name), title=name)
2438 timer(getobs(name), title=name)
2427
2439
2428 def getfiltered(name):
2440 def getfiltered(name):
2429 def d():
2441 def d():
2430 repo.invalidatevolatilesets()
2442 repo.invalidatevolatilesets()
2431 if opts[b'clear_obsstore']:
2443 if opts[b'clear_obsstore']:
2432 clearfilecache(repo, b'obsstore')
2444 clearfilecache(repo, b'obsstore')
2433 repoview.filterrevs(repo, name)
2445 repoview.filterrevs(repo, name)
2434 return d
2446 return d
2435
2447
2436 allfilter = sorted(repoview.filtertable)
2448 allfilter = sorted(repoview.filtertable)
2437 if names:
2449 if names:
2438 allfilter = [n for n in allfilter if n in names]
2450 allfilter = [n for n in allfilter if n in names]
2439
2451
2440 for name in allfilter:
2452 for name in allfilter:
2441 timer(getfiltered(name), title=name)
2453 timer(getfiltered(name), title=name)
2442 fm.end()
2454 fm.end()
2443
2455
2444 @command(b'perfbranchmap',
2456 @command(b'perfbranchmap',
2445 [(b'f', b'full', False,
2457 [(b'f', b'full', False,
2446 b'Includes build time of subset'),
2458 b'Includes build time of subset'),
2447 (b'', b'clear-revbranch', False,
2459 (b'', b'clear-revbranch', False,
2448 b'purge the revbranch cache between computation'),
2460 b'purge the revbranch cache between computation'),
2449 ] + formatteropts)
2461 ] + formatteropts)
2450 def perfbranchmap(ui, repo, *filternames, **opts):
2462 def perfbranchmap(ui, repo, *filternames, **opts):
2451 """benchmark the update of a branchmap
2463 """benchmark the update of a branchmap
2452
2464
2453 This benchmarks the full repo.branchmap() call with read and write disabled
2465 This benchmarks the full repo.branchmap() call with read and write disabled
2454 """
2466 """
2455 opts = _byteskwargs(opts)
2467 opts = _byteskwargs(opts)
2456 full = opts.get(b"full", False)
2468 full = opts.get(b"full", False)
2457 clear_revbranch = opts.get(b"clear_revbranch", False)
2469 clear_revbranch = opts.get(b"clear_revbranch", False)
2458 timer, fm = gettimer(ui, opts)
2470 timer, fm = gettimer(ui, opts)
2459 def getbranchmap(filtername):
2471 def getbranchmap(filtername):
2460 """generate a benchmark function for the filtername"""
2472 """generate a benchmark function for the filtername"""
2461 if filtername is None:
2473 if filtername is None:
2462 view = repo
2474 view = repo
2463 else:
2475 else:
2464 view = repo.filtered(filtername)
2476 view = repo.filtered(filtername)
2465 if util.safehasattr(view._branchcaches, '_per_filter'):
2477 if util.safehasattr(view._branchcaches, '_per_filter'):
2466 filtered = view._branchcaches._per_filter
2478 filtered = view._branchcaches._per_filter
2467 else:
2479 else:
2468 # older versions
2480 # older versions
2469 filtered = view._branchcaches
2481 filtered = view._branchcaches
2470 def d():
2482 def d():
2471 if clear_revbranch:
2483 if clear_revbranch:
2472 repo.revbranchcache()._clear()
2484 repo.revbranchcache()._clear()
2473 if full:
2485 if full:
2474 view._branchcaches.clear()
2486 view._branchcaches.clear()
2475 else:
2487 else:
2476 filtered.pop(filtername, None)
2488 filtered.pop(filtername, None)
2477 view.branchmap()
2489 view.branchmap()
2478 return d
2490 return d
2479 # add filter in smaller subset to bigger subset
2491 # add filter in smaller subset to bigger subset
2480 possiblefilters = set(repoview.filtertable)
2492 possiblefilters = set(repoview.filtertable)
2481 if filternames:
2493 if filternames:
2482 possiblefilters &= set(filternames)
2494 possiblefilters &= set(filternames)
2483 subsettable = getbranchmapsubsettable()
2495 subsettable = getbranchmapsubsettable()
2484 allfilters = []
2496 allfilters = []
2485 while possiblefilters:
2497 while possiblefilters:
2486 for name in possiblefilters:
2498 for name in possiblefilters:
2487 subset = subsettable.get(name)
2499 subset = subsettable.get(name)
2488 if subset not in possiblefilters:
2500 if subset not in possiblefilters:
2489 break
2501 break
2490 else:
2502 else:
2491 assert False, b'subset cycle %s!' % possiblefilters
2503 assert False, b'subset cycle %s!' % possiblefilters
2492 allfilters.append(name)
2504 allfilters.append(name)
2493 possiblefilters.remove(name)
2505 possiblefilters.remove(name)
2494
2506
2495 # warm the cache
2507 # warm the cache
2496 if not full:
2508 if not full:
2497 for name in allfilters:
2509 for name in allfilters:
2498 repo.filtered(name).branchmap()
2510 repo.filtered(name).branchmap()
2499 if not filternames or b'unfiltered' in filternames:
2511 if not filternames or b'unfiltered' in filternames:
2500 # add unfiltered
2512 # add unfiltered
2501 allfilters.append(None)
2513 allfilters.append(None)
2502
2514
2503 if util.safehasattr(branchmap.branchcache, 'fromfile'):
2515 if util.safehasattr(branchmap.branchcache, 'fromfile'):
2504 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
2516 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
2505 branchcacheread.set(classmethod(lambda *args: None))
2517 branchcacheread.set(classmethod(lambda *args: None))
2506 else:
2518 else:
2507 # older versions
2519 # older versions
2508 branchcacheread = safeattrsetter(branchmap, b'read')
2520 branchcacheread = safeattrsetter(branchmap, b'read')
2509 branchcacheread.set(lambda *args: None)
2521 branchcacheread.set(lambda *args: None)
2510 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
2522 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
2511 branchcachewrite.set(lambda *args: None)
2523 branchcachewrite.set(lambda *args: None)
2512 try:
2524 try:
2513 for name in allfilters:
2525 for name in allfilters:
2514 printname = name
2526 printname = name
2515 if name is None:
2527 if name is None:
2516 printname = b'unfiltered'
2528 printname = b'unfiltered'
2517 timer(getbranchmap(name), title=str(printname))
2529 timer(getbranchmap(name), title=str(printname))
2518 finally:
2530 finally:
2519 branchcacheread.restore()
2531 branchcacheread.restore()
2520 branchcachewrite.restore()
2532 branchcachewrite.restore()
2521 fm.end()
2533 fm.end()
2522
2534
2523 @command(b'perfbranchmapupdate', [
2535 @command(b'perfbranchmapupdate', [
2524 (b'', b'base', [], b'subset of revision to start from'),
2536 (b'', b'base', [], b'subset of revision to start from'),
2525 (b'', b'target', [], b'subset of revision to end with'),
2537 (b'', b'target', [], b'subset of revision to end with'),
2526 (b'', b'clear-caches', False, b'clear cache between each runs')
2538 (b'', b'clear-caches', False, b'clear cache between each runs')
2527 ] + formatteropts)
2539 ] + formatteropts)
2528 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
2540 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
2529 """benchmark branchmap update from for <base> revs to <target> revs
2541 """benchmark branchmap update from for <base> revs to <target> revs
2530
2542
2531 If `--clear-caches` is passed, the following items will be reset before
2543 If `--clear-caches` is passed, the following items will be reset before
2532 each update:
2544 each update:
2533 * the changelog instance and associated indexes
2545 * the changelog instance and associated indexes
2534 * the rev-branch-cache instance
2546 * the rev-branch-cache instance
2535
2547
2536 Examples:
2548 Examples:
2537
2549
2538 # update for the one last revision
2550 # update for the one last revision
2539 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
2551 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
2540
2552
2541 $ update for change coming with a new branch
2553 $ update for change coming with a new branch
2542 $ hg perfbranchmapupdate --base 'stable' --target 'default'
2554 $ hg perfbranchmapupdate --base 'stable' --target 'default'
2543 """
2555 """
2544 from mercurial import branchmap
2556 from mercurial import branchmap
2545 from mercurial import repoview
2557 from mercurial import repoview
2546 opts = _byteskwargs(opts)
2558 opts = _byteskwargs(opts)
2547 timer, fm = gettimer(ui, opts)
2559 timer, fm = gettimer(ui, opts)
2548 clearcaches = opts[b'clear_caches']
2560 clearcaches = opts[b'clear_caches']
2549 unfi = repo.unfiltered()
2561 unfi = repo.unfiltered()
2550 x = [None] # used to pass data between closure
2562 x = [None] # used to pass data between closure
2551
2563
2552 # we use a `list` here to avoid possible side effect from smartset
2564 # we use a `list` here to avoid possible side effect from smartset
2553 baserevs = list(scmutil.revrange(repo, base))
2565 baserevs = list(scmutil.revrange(repo, base))
2554 targetrevs = list(scmutil.revrange(repo, target))
2566 targetrevs = list(scmutil.revrange(repo, target))
2555 if not baserevs:
2567 if not baserevs:
2556 raise error.Abort(b'no revisions selected for --base')
2568 raise error.Abort(b'no revisions selected for --base')
2557 if not targetrevs:
2569 if not targetrevs:
2558 raise error.Abort(b'no revisions selected for --target')
2570 raise error.Abort(b'no revisions selected for --target')
2559
2571
2560 # make sure the target branchmap also contains the one in the base
2572 # make sure the target branchmap also contains the one in the base
2561 targetrevs = list(set(baserevs) | set(targetrevs))
2573 targetrevs = list(set(baserevs) | set(targetrevs))
2562 targetrevs.sort()
2574 targetrevs.sort()
2563
2575
2564 cl = repo.changelog
2576 cl = repo.changelog
2565 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
2577 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
2566 allbaserevs.sort()
2578 allbaserevs.sort()
2567 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
2579 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
2568
2580
2569 newrevs = list(alltargetrevs.difference(allbaserevs))
2581 newrevs = list(alltargetrevs.difference(allbaserevs))
2570 newrevs.sort()
2582 newrevs.sort()
2571
2583
2572 allrevs = frozenset(unfi.changelog.revs())
2584 allrevs = frozenset(unfi.changelog.revs())
2573 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
2585 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
2574 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
2586 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
2575
2587
2576 def basefilter(repo, visibilityexceptions=None):
2588 def basefilter(repo, visibilityexceptions=None):
2577 return basefilterrevs
2589 return basefilterrevs
2578
2590
2579 def targetfilter(repo, visibilityexceptions=None):
2591 def targetfilter(repo, visibilityexceptions=None):
2580 return targetfilterrevs
2592 return targetfilterrevs
2581
2593
2582 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
2594 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
2583 ui.status(msg % (len(allbaserevs), len(newrevs)))
2595 ui.status(msg % (len(allbaserevs), len(newrevs)))
2584 if targetfilterrevs:
2596 if targetfilterrevs:
2585 msg = b'(%d revisions still filtered)\n'
2597 msg = b'(%d revisions still filtered)\n'
2586 ui.status(msg % len(targetfilterrevs))
2598 ui.status(msg % len(targetfilterrevs))
2587
2599
2588 try:
2600 try:
2589 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
2601 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
2590 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
2602 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
2591
2603
2592 baserepo = repo.filtered(b'__perf_branchmap_update_base')
2604 baserepo = repo.filtered(b'__perf_branchmap_update_base')
2593 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
2605 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
2594
2606
2595 # try to find an existing branchmap to reuse
2607 # try to find an existing branchmap to reuse
2596 subsettable = getbranchmapsubsettable()
2608 subsettable = getbranchmapsubsettable()
2597 candidatefilter = subsettable.get(None)
2609 candidatefilter = subsettable.get(None)
2598 while candidatefilter is not None:
2610 while candidatefilter is not None:
2599 candidatebm = repo.filtered(candidatefilter).branchmap()
2611 candidatebm = repo.filtered(candidatefilter).branchmap()
2600 if candidatebm.validfor(baserepo):
2612 if candidatebm.validfor(baserepo):
2601 filtered = repoview.filterrevs(repo, candidatefilter)
2613 filtered = repoview.filterrevs(repo, candidatefilter)
2602 missing = [r for r in allbaserevs if r in filtered]
2614 missing = [r for r in allbaserevs if r in filtered]
2603 base = candidatebm.copy()
2615 base = candidatebm.copy()
2604 base.update(baserepo, missing)
2616 base.update(baserepo, missing)
2605 break
2617 break
2606 candidatefilter = subsettable.get(candidatefilter)
2618 candidatefilter = subsettable.get(candidatefilter)
2607 else:
2619 else:
2608 # no suitable subset where found
2620 # no suitable subset where found
2609 base = branchmap.branchcache()
2621 base = branchmap.branchcache()
2610 base.update(baserepo, allbaserevs)
2622 base.update(baserepo, allbaserevs)
2611
2623
2612 def setup():
2624 def setup():
2613 x[0] = base.copy()
2625 x[0] = base.copy()
2614 if clearcaches:
2626 if clearcaches:
2615 unfi._revbranchcache = None
2627 unfi._revbranchcache = None
2616 clearchangelog(repo)
2628 clearchangelog(repo)
2617
2629
2618 def bench():
2630 def bench():
2619 x[0].update(targetrepo, newrevs)
2631 x[0].update(targetrepo, newrevs)
2620
2632
2621 timer(bench, setup=setup)
2633 timer(bench, setup=setup)
2622 fm.end()
2634 fm.end()
2623 finally:
2635 finally:
2624 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
2636 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
2625 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
2637 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
2626
2638
2627 @command(b'perfbranchmapload', [
2639 @command(b'perfbranchmapload', [
2628 (b'f', b'filter', b'', b'Specify repoview filter'),
2640 (b'f', b'filter', b'', b'Specify repoview filter'),
2629 (b'', b'list', False, b'List brachmap filter caches'),
2641 (b'', b'list', False, b'List brachmap filter caches'),
2630 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
2642 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
2631
2643
2632 ] + formatteropts)
2644 ] + formatteropts)
2633 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
2645 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
2634 """benchmark reading the branchmap"""
2646 """benchmark reading the branchmap"""
2635 opts = _byteskwargs(opts)
2647 opts = _byteskwargs(opts)
2636 clearrevlogs = opts[b'clear_revlogs']
2648 clearrevlogs = opts[b'clear_revlogs']
2637
2649
2638 if list:
2650 if list:
2639 for name, kind, st in repo.cachevfs.readdir(stat=True):
2651 for name, kind, st in repo.cachevfs.readdir(stat=True):
2640 if name.startswith(b'branch2'):
2652 if name.startswith(b'branch2'):
2641 filtername = name.partition(b'-')[2] or b'unfiltered'
2653 filtername = name.partition(b'-')[2] or b'unfiltered'
2642 ui.status(b'%s - %s\n'
2654 ui.status(b'%s - %s\n'
2643 % (filtername, util.bytecount(st.st_size)))
2655 % (filtername, util.bytecount(st.st_size)))
2644 return
2656 return
2645 if not filter:
2657 if not filter:
2646 filter = None
2658 filter = None
2647 subsettable = getbranchmapsubsettable()
2659 subsettable = getbranchmapsubsettable()
2648 if filter is None:
2660 if filter is None:
2649 repo = repo.unfiltered()
2661 repo = repo.unfiltered()
2650 else:
2662 else:
2651 repo = repoview.repoview(repo, filter)
2663 repo = repoview.repoview(repo, filter)
2652
2664
2653 repo.branchmap() # make sure we have a relevant, up to date branchmap
2665 repo.branchmap() # make sure we have a relevant, up to date branchmap
2654
2666
2655 try:
2667 try:
2656 fromfile = branchmap.branchcache.fromfile
2668 fromfile = branchmap.branchcache.fromfile
2657 except AttributeError:
2669 except AttributeError:
2658 # older versions
2670 # older versions
2659 fromfile = branchmap.read
2671 fromfile = branchmap.read
2660
2672
2661 currentfilter = filter
2673 currentfilter = filter
2662 # try once without timer, the filter may not be cached
2674 # try once without timer, the filter may not be cached
2663 while fromfile(repo) is None:
2675 while fromfile(repo) is None:
2664 currentfilter = subsettable.get(currentfilter)
2676 currentfilter = subsettable.get(currentfilter)
2665 if currentfilter is None:
2677 if currentfilter is None:
2666 raise error.Abort(b'No branchmap cached for %s repo'
2678 raise error.Abort(b'No branchmap cached for %s repo'
2667 % (filter or b'unfiltered'))
2679 % (filter or b'unfiltered'))
2668 repo = repo.filtered(currentfilter)
2680 repo = repo.filtered(currentfilter)
2669 timer, fm = gettimer(ui, opts)
2681 timer, fm = gettimer(ui, opts)
2670 def setup():
2682 def setup():
2671 if clearrevlogs:
2683 if clearrevlogs:
2672 clearchangelog(repo)
2684 clearchangelog(repo)
2673 def bench():
2685 def bench():
2674 fromfile(repo)
2686 fromfile(repo)
2675 timer(bench, setup=setup)
2687 timer(bench, setup=setup)
2676 fm.end()
2688 fm.end()
2677
2689
2678 @command(b'perfloadmarkers')
2690 @command(b'perfloadmarkers')
2679 def perfloadmarkers(ui, repo):
2691 def perfloadmarkers(ui, repo):
2680 """benchmark the time to parse the on-disk markers for a repo
2692 """benchmark the time to parse the on-disk markers for a repo
2681
2693
2682 Result is the number of markers in the repo."""
2694 Result is the number of markers in the repo."""
2683 timer, fm = gettimer(ui)
2695 timer, fm = gettimer(ui)
2684 svfs = getsvfs(repo)
2696 svfs = getsvfs(repo)
2685 timer(lambda: len(obsolete.obsstore(svfs)))
2697 timer(lambda: len(obsolete.obsstore(svfs)))
2686 fm.end()
2698 fm.end()
2687
2699
2688 @command(b'perflrucachedict', formatteropts +
2700 @command(b'perflrucachedict', formatteropts +
2689 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
2701 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
2690 (b'', b'mincost', 0, b'smallest cost of items in cache'),
2702 (b'', b'mincost', 0, b'smallest cost of items in cache'),
2691 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
2703 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
2692 (b'', b'size', 4, b'size of cache'),
2704 (b'', b'size', 4, b'size of cache'),
2693 (b'', b'gets', 10000, b'number of key lookups'),
2705 (b'', b'gets', 10000, b'number of key lookups'),
2694 (b'', b'sets', 10000, b'number of key sets'),
2706 (b'', b'sets', 10000, b'number of key sets'),
2695 (b'', b'mixed', 10000, b'number of mixed mode operations'),
2707 (b'', b'mixed', 10000, b'number of mixed mode operations'),
2696 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
2708 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
2697 norepo=True)
2709 norepo=True)
2698 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
2710 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
2699 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
2711 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
2700 opts = _byteskwargs(opts)
2712 opts = _byteskwargs(opts)
2701
2713
2702 def doinit():
2714 def doinit():
2703 for i in _xrange(10000):
2715 for i in _xrange(10000):
2704 util.lrucachedict(size)
2716 util.lrucachedict(size)
2705
2717
2706 costrange = list(range(mincost, maxcost + 1))
2718 costrange = list(range(mincost, maxcost + 1))
2707
2719
2708 values = []
2720 values = []
2709 for i in _xrange(size):
2721 for i in _xrange(size):
2710 values.append(random.randint(0, _maxint))
2722 values.append(random.randint(0, _maxint))
2711
2723
2712 # Get mode fills the cache and tests raw lookup performance with no
2724 # Get mode fills the cache and tests raw lookup performance with no
2713 # eviction.
2725 # eviction.
2714 getseq = []
2726 getseq = []
2715 for i in _xrange(gets):
2727 for i in _xrange(gets):
2716 getseq.append(random.choice(values))
2728 getseq.append(random.choice(values))
2717
2729
2718 def dogets():
2730 def dogets():
2719 d = util.lrucachedict(size)
2731 d = util.lrucachedict(size)
2720 for v in values:
2732 for v in values:
2721 d[v] = v
2733 d[v] = v
2722 for key in getseq:
2734 for key in getseq:
2723 value = d[key]
2735 value = d[key]
2724 value # silence pyflakes warning
2736 value # silence pyflakes warning
2725
2737
2726 def dogetscost():
2738 def dogetscost():
2727 d = util.lrucachedict(size, maxcost=costlimit)
2739 d = util.lrucachedict(size, maxcost=costlimit)
2728 for i, v in enumerate(values):
2740 for i, v in enumerate(values):
2729 d.insert(v, v, cost=costs[i])
2741 d.insert(v, v, cost=costs[i])
2730 for key in getseq:
2742 for key in getseq:
2731 try:
2743 try:
2732 value = d[key]
2744 value = d[key]
2733 value # silence pyflakes warning
2745 value # silence pyflakes warning
2734 except KeyError:
2746 except KeyError:
2735 pass
2747 pass
2736
2748
2737 # Set mode tests insertion speed with cache eviction.
2749 # Set mode tests insertion speed with cache eviction.
2738 setseq = []
2750 setseq = []
2739 costs = []
2751 costs = []
2740 for i in _xrange(sets):
2752 for i in _xrange(sets):
2741 setseq.append(random.randint(0, _maxint))
2753 setseq.append(random.randint(0, _maxint))
2742 costs.append(random.choice(costrange))
2754 costs.append(random.choice(costrange))
2743
2755
2744 def doinserts():
2756 def doinserts():
2745 d = util.lrucachedict(size)
2757 d = util.lrucachedict(size)
2746 for v in setseq:
2758 for v in setseq:
2747 d.insert(v, v)
2759 d.insert(v, v)
2748
2760
2749 def doinsertscost():
2761 def doinsertscost():
2750 d = util.lrucachedict(size, maxcost=costlimit)
2762 d = util.lrucachedict(size, maxcost=costlimit)
2751 for i, v in enumerate(setseq):
2763 for i, v in enumerate(setseq):
2752 d.insert(v, v, cost=costs[i])
2764 d.insert(v, v, cost=costs[i])
2753
2765
2754 def dosets():
2766 def dosets():
2755 d = util.lrucachedict(size)
2767 d = util.lrucachedict(size)
2756 for v in setseq:
2768 for v in setseq:
2757 d[v] = v
2769 d[v] = v
2758
2770
2759 # Mixed mode randomly performs gets and sets with eviction.
2771 # Mixed mode randomly performs gets and sets with eviction.
2760 mixedops = []
2772 mixedops = []
2761 for i in _xrange(mixed):
2773 for i in _xrange(mixed):
2762 r = random.randint(0, 100)
2774 r = random.randint(0, 100)
2763 if r < mixedgetfreq:
2775 if r < mixedgetfreq:
2764 op = 0
2776 op = 0
2765 else:
2777 else:
2766 op = 1
2778 op = 1
2767
2779
2768 mixedops.append((op,
2780 mixedops.append((op,
2769 random.randint(0, size * 2),
2781 random.randint(0, size * 2),
2770 random.choice(costrange)))
2782 random.choice(costrange)))
2771
2783
2772 def domixed():
2784 def domixed():
2773 d = util.lrucachedict(size)
2785 d = util.lrucachedict(size)
2774
2786
2775 for op, v, cost in mixedops:
2787 for op, v, cost in mixedops:
2776 if op == 0:
2788 if op == 0:
2777 try:
2789 try:
2778 d[v]
2790 d[v]
2779 except KeyError:
2791 except KeyError:
2780 pass
2792 pass
2781 else:
2793 else:
2782 d[v] = v
2794 d[v] = v
2783
2795
2784 def domixedcost():
2796 def domixedcost():
2785 d = util.lrucachedict(size, maxcost=costlimit)
2797 d = util.lrucachedict(size, maxcost=costlimit)
2786
2798
2787 for op, v, cost in mixedops:
2799 for op, v, cost in mixedops:
2788 if op == 0:
2800 if op == 0:
2789 try:
2801 try:
2790 d[v]
2802 d[v]
2791 except KeyError:
2803 except KeyError:
2792 pass
2804 pass
2793 else:
2805 else:
2794 d.insert(v, v, cost=cost)
2806 d.insert(v, v, cost=cost)
2795
2807
2796 benches = [
2808 benches = [
2797 (doinit, b'init'),
2809 (doinit, b'init'),
2798 ]
2810 ]
2799
2811
2800 if costlimit:
2812 if costlimit:
2801 benches.extend([
2813 benches.extend([
2802 (dogetscost, b'gets w/ cost limit'),
2814 (dogetscost, b'gets w/ cost limit'),
2803 (doinsertscost, b'inserts w/ cost limit'),
2815 (doinsertscost, b'inserts w/ cost limit'),
2804 (domixedcost, b'mixed w/ cost limit'),
2816 (domixedcost, b'mixed w/ cost limit'),
2805 ])
2817 ])
2806 else:
2818 else:
2807 benches.extend([
2819 benches.extend([
2808 (dogets, b'gets'),
2820 (dogets, b'gets'),
2809 (doinserts, b'inserts'),
2821 (doinserts, b'inserts'),
2810 (dosets, b'sets'),
2822 (dosets, b'sets'),
2811 (domixed, b'mixed')
2823 (domixed, b'mixed')
2812 ])
2824 ])
2813
2825
2814 for fn, title in benches:
2826 for fn, title in benches:
2815 timer, fm = gettimer(ui, opts)
2827 timer, fm = gettimer(ui, opts)
2816 timer(fn, title=title)
2828 timer(fn, title=title)
2817 fm.end()
2829 fm.end()
2818
2830
2819 @command(b'perfwrite', formatteropts)
2831 @command(b'perfwrite', formatteropts)
2820 def perfwrite(ui, repo, **opts):
2832 def perfwrite(ui, repo, **opts):
2821 """microbenchmark ui.write
2833 """microbenchmark ui.write
2822 """
2834 """
2823 opts = _byteskwargs(opts)
2835 opts = _byteskwargs(opts)
2824
2836
2825 timer, fm = gettimer(ui, opts)
2837 timer, fm = gettimer(ui, opts)
2826 def write():
2838 def write():
2827 for i in range(100000):
2839 for i in range(100000):
2828 ui.write((b'Testing write performance\n'))
2840 ui.write((b'Testing write performance\n'))
2829 timer(write)
2841 timer(write)
2830 fm.end()
2842 fm.end()
2831
2843
2832 def uisetup(ui):
2844 def uisetup(ui):
2833 if (util.safehasattr(cmdutil, b'openrevlog') and
2845 if (util.safehasattr(cmdutil, b'openrevlog') and
2834 not util.safehasattr(commands, b'debugrevlogopts')):
2846 not util.safehasattr(commands, b'debugrevlogopts')):
2835 # for "historical portability":
2847 # for "historical portability":
2836 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
2848 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
2837 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
2849 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
2838 # openrevlog() should cause failure, because it has been
2850 # openrevlog() should cause failure, because it has been
2839 # available since 3.5 (or 49c583ca48c4).
2851 # available since 3.5 (or 49c583ca48c4).
2840 def openrevlog(orig, repo, cmd, file_, opts):
2852 def openrevlog(orig, repo, cmd, file_, opts):
2841 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
2853 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
2842 raise error.Abort(b"This version doesn't support --dir option",
2854 raise error.Abort(b"This version doesn't support --dir option",
2843 hint=b"use 3.5 or later")
2855 hint=b"use 3.5 or later")
2844 return orig(repo, cmd, file_, opts)
2856 return orig(repo, cmd, file_, opts)
2845 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
2857 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
2846
2858
2847 @command(b'perfprogress', formatteropts + [
2859 @command(b'perfprogress', formatteropts + [
2848 (b'', b'topic', b'topic', b'topic for progress messages'),
2860 (b'', b'topic', b'topic', b'topic for progress messages'),
2849 (b'c', b'total', 1000000, b'total value we are progressing to'),
2861 (b'c', b'total', 1000000, b'total value we are progressing to'),
2850 ], norepo=True)
2862 ], norepo=True)
2851 def perfprogress(ui, topic=None, total=None, **opts):
2863 def perfprogress(ui, topic=None, total=None, **opts):
2852 """printing of progress bars"""
2864 """printing of progress bars"""
2853 opts = _byteskwargs(opts)
2865 opts = _byteskwargs(opts)
2854
2866
2855 timer, fm = gettimer(ui, opts)
2867 timer, fm = gettimer(ui, opts)
2856
2868
2857 def doprogress():
2869 def doprogress():
2858 with ui.makeprogress(topic, total=total) as progress:
2870 with ui.makeprogress(topic, total=total) as progress:
2859 for i in pycompat.xrange(total):
2871 for i in pycompat.xrange(total):
2860 progress.increment()
2872 progress.increment()
2861
2873
2862 timer(doprogress)
2874 timer(doprogress)
2863 fm.end()
2875 fm.end()
@@ -1,356 +1,378
1 #require test-repo
1 #require test-repo
2
2
3 Set vars:
3 Set vars:
4
4
5 $ . "$TESTDIR/helpers-testrepo.sh"
5 $ . "$TESTDIR/helpers-testrepo.sh"
6 $ CONTRIBDIR="$TESTDIR/../contrib"
6 $ CONTRIBDIR="$TESTDIR/../contrib"
7
7
8 Prepare repo:
8 Prepare repo:
9
9
10 $ hg init
10 $ hg init
11
11
12 $ echo this is file a > a
12 $ echo this is file a > a
13 $ hg add a
13 $ hg add a
14 $ hg commit -m first
14 $ hg commit -m first
15
15
16 $ echo adding to file a >> a
16 $ echo adding to file a >> a
17 $ hg commit -m second
17 $ hg commit -m second
18
18
19 $ echo adding more to file a >> a
19 $ echo adding more to file a >> a
20 $ hg commit -m third
20 $ hg commit -m third
21
21
22 $ hg up -r 0
22 $ hg up -r 0
23 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
23 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
24 $ echo merge-this >> a
24 $ echo merge-this >> a
25 $ hg commit -m merge-able
25 $ hg commit -m merge-able
26 created new head
26 created new head
27
27
28 $ hg up -r 2
28 $ hg up -r 2
29 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
29 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
30
30
31 perfstatus
31 perfstatus
32
32
33 $ cat >> $HGRCPATH << EOF
33 $ cat >> $HGRCPATH << EOF
34 > [extensions]
34 > [extensions]
35 > perf=$CONTRIBDIR/perf.py
35 > perf=$CONTRIBDIR/perf.py
36 > [perf]
36 > [perf]
37 > presleep=0
37 > presleep=0
38 > stub=on
38 > stub=on
39 > parentscount=1
39 > parentscount=1
40 > EOF
40 > EOF
41 $ hg help -e perf
41 $ hg help -e perf
42 perf extension - helper extension to measure performance
42 perf extension - helper extension to measure performance
43
43
44 Configurations
44 Configurations
45 ==============
45 ==============
46
46
47 "perf"
47 "perf"
48 ------
48 ------
49
49
50 "all-timing"
50 "all-timing"
51 When set, additional statistics will be reported for each benchmark: best,
51 When set, additional statistics will be reported for each benchmark: best,
52 worst, median average. If not set only the best timing is reported
52 worst, median average. If not set only the best timing is reported
53 (default: off).
53 (default: off).
54
54
55 "presleep"
55 "presleep"
56 number of second to wait before any group of runs (default: 1)
56 number of second to wait before any group of runs (default: 1)
57
57
58 "pre-run"
59 number of run to perform before starting measurement.
60
58 "run-limits"
61 "run-limits"
59 Control the number of runs each benchmark will perform. The option value
62 Control the number of runs each benchmark will perform. The option value
60 should be a list of '<time>-<numberofrun>' pairs. After each run the
63 should be a list of '<time>-<numberofrun>' pairs. After each run the
61 conditions are considered in order with the following logic:
64 conditions are considered in order with the following logic:
62
65
63 If benchmark has been running for <time> seconds, and we have performed
66 If benchmark has been running for <time> seconds, and we have performed
64 <numberofrun> iterations, stop the benchmark,
67 <numberofrun> iterations, stop the benchmark,
65
68
66 The default value is: '3.0-100, 10.0-3'
69 The default value is: '3.0-100, 10.0-3'
67
70
68 "stub"
71 "stub"
69 When set, benchmarks will only be run once, useful for testing (default:
72 When set, benchmarks will only be run once, useful for testing (default:
70 off)
73 off)
71
74
72 list of commands:
75 list of commands:
73
76
74 perfaddremove
77 perfaddremove
75 (no help text available)
78 (no help text available)
76 perfancestors
79 perfancestors
77 (no help text available)
80 (no help text available)
78 perfancestorset
81 perfancestorset
79 (no help text available)
82 (no help text available)
80 perfannotate (no help text available)
83 perfannotate (no help text available)
81 perfbdiff benchmark a bdiff between revisions
84 perfbdiff benchmark a bdiff between revisions
82 perfbookmarks
85 perfbookmarks
83 benchmark parsing bookmarks from disk to memory
86 benchmark parsing bookmarks from disk to memory
84 perfbranchmap
87 perfbranchmap
85 benchmark the update of a branchmap
88 benchmark the update of a branchmap
86 perfbranchmapload
89 perfbranchmapload
87 benchmark reading the branchmap
90 benchmark reading the branchmap
88 perfbranchmapupdate
91 perfbranchmapupdate
89 benchmark branchmap update from for <base> revs to <target>
92 benchmark branchmap update from for <base> revs to <target>
90 revs
93 revs
91 perfbundleread
94 perfbundleread
92 Benchmark reading of bundle files.
95 Benchmark reading of bundle files.
93 perfcca (no help text available)
96 perfcca (no help text available)
94 perfchangegroupchangelog
97 perfchangegroupchangelog
95 Benchmark producing a changelog group for a changegroup.
98 Benchmark producing a changelog group for a changegroup.
96 perfchangeset
99 perfchangeset
97 (no help text available)
100 (no help text available)
98 perfctxfiles (no help text available)
101 perfctxfiles (no help text available)
99 perfdiffwd Profile diff of working directory changes
102 perfdiffwd Profile diff of working directory changes
100 perfdirfoldmap
103 perfdirfoldmap
101 (no help text available)
104 (no help text available)
102 perfdirs (no help text available)
105 perfdirs (no help text available)
103 perfdirstate (no help text available)
106 perfdirstate (no help text available)
104 perfdirstatedirs
107 perfdirstatedirs
105 (no help text available)
108 (no help text available)
106 perfdirstatefoldmap
109 perfdirstatefoldmap
107 (no help text available)
110 (no help text available)
108 perfdirstatewrite
111 perfdirstatewrite
109 (no help text available)
112 (no help text available)
110 perfdiscovery
113 perfdiscovery
111 benchmark discovery between local repo and the peer at given
114 benchmark discovery between local repo and the peer at given
112 path
115 path
113 perffncacheencode
116 perffncacheencode
114 (no help text available)
117 (no help text available)
115 perffncacheload
118 perffncacheload
116 (no help text available)
119 (no help text available)
117 perffncachewrite
120 perffncachewrite
118 (no help text available)
121 (no help text available)
119 perfheads benchmark the computation of a changelog heads
122 perfheads benchmark the computation of a changelog heads
120 perfhelper-pathcopies
123 perfhelper-pathcopies
121 find statistic about potential parameters for the
124 find statistic about potential parameters for the
122 'perftracecopies'
125 'perftracecopies'
123 perfignore benchmark operation related to computing ignore
126 perfignore benchmark operation related to computing ignore
124 perfindex benchmark index creation time followed by a lookup
127 perfindex benchmark index creation time followed by a lookup
125 perflinelogedits
128 perflinelogedits
126 (no help text available)
129 (no help text available)
127 perfloadmarkers
130 perfloadmarkers
128 benchmark the time to parse the on-disk markers for a repo
131 benchmark the time to parse the on-disk markers for a repo
129 perflog (no help text available)
132 perflog (no help text available)
130 perflookup (no help text available)
133 perflookup (no help text available)
131 perflrucachedict
134 perflrucachedict
132 (no help text available)
135 (no help text available)
133 perfmanifest benchmark the time to read a manifest from disk and return a
136 perfmanifest benchmark the time to read a manifest from disk and return a
134 usable
137 usable
135 perfmergecalculate
138 perfmergecalculate
136 (no help text available)
139 (no help text available)
137 perfmoonwalk benchmark walking the changelog backwards
140 perfmoonwalk benchmark walking the changelog backwards
138 perfnodelookup
141 perfnodelookup
139 (no help text available)
142 (no help text available)
140 perfnodemap benchmark the time necessary to look up revision from a cold
143 perfnodemap benchmark the time necessary to look up revision from a cold
141 nodemap
144 nodemap
142 perfparents benchmark the time necessary to fetch one changeset's parents.
145 perfparents benchmark the time necessary to fetch one changeset's parents.
143 perfpathcopies
146 perfpathcopies
144 benchmark the copy tracing logic
147 benchmark the copy tracing logic
145 perfphases benchmark phasesets computation
148 perfphases benchmark phasesets computation
146 perfphasesremote
149 perfphasesremote
147 benchmark time needed to analyse phases of the remote server
150 benchmark time needed to analyse phases of the remote server
148 perfprogress printing of progress bars
151 perfprogress printing of progress bars
149 perfrawfiles (no help text available)
152 perfrawfiles (no help text available)
150 perfrevlogchunks
153 perfrevlogchunks
151 Benchmark operations on revlog chunks.
154 Benchmark operations on revlog chunks.
152 perfrevlogindex
155 perfrevlogindex
153 Benchmark operations against a revlog index.
156 Benchmark operations against a revlog index.
154 perfrevlogrevision
157 perfrevlogrevision
155 Benchmark obtaining a revlog revision.
158 Benchmark obtaining a revlog revision.
156 perfrevlogrevisions
159 perfrevlogrevisions
157 Benchmark reading a series of revisions from a revlog.
160 Benchmark reading a series of revisions from a revlog.
158 perfrevlogwrite
161 perfrevlogwrite
159 Benchmark writing a series of revisions to a revlog.
162 Benchmark writing a series of revisions to a revlog.
160 perfrevrange (no help text available)
163 perfrevrange (no help text available)
161 perfrevset benchmark the execution time of a revset
164 perfrevset benchmark the execution time of a revset
162 perfstartup (no help text available)
165 perfstartup (no help text available)
163 perfstatus (no help text available)
166 perfstatus (no help text available)
164 perftags (no help text available)
167 perftags (no help text available)
165 perftemplating
168 perftemplating
166 test the rendering time of a given template
169 test the rendering time of a given template
167 perfunidiff benchmark a unified diff between revisions
170 perfunidiff benchmark a unified diff between revisions
168 perfvolatilesets
171 perfvolatilesets
169 benchmark the computation of various volatile set
172 benchmark the computation of various volatile set
170 perfwalk (no help text available)
173 perfwalk (no help text available)
171 perfwrite microbenchmark ui.write
174 perfwrite microbenchmark ui.write
172
175
173 (use 'hg help -v perf' to show built-in aliases and global options)
176 (use 'hg help -v perf' to show built-in aliases and global options)
174 $ hg perfaddremove
177 $ hg perfaddremove
175 $ hg perfancestors
178 $ hg perfancestors
176 $ hg perfancestorset 2
179 $ hg perfancestorset 2
177 $ hg perfannotate a
180 $ hg perfannotate a
178 $ hg perfbdiff -c 1
181 $ hg perfbdiff -c 1
179 $ hg perfbdiff --alldata 1
182 $ hg perfbdiff --alldata 1
180 $ hg perfunidiff -c 1
183 $ hg perfunidiff -c 1
181 $ hg perfunidiff --alldata 1
184 $ hg perfunidiff --alldata 1
182 $ hg perfbookmarks
185 $ hg perfbookmarks
183 $ hg perfbranchmap
186 $ hg perfbranchmap
184 $ hg perfbranchmapload
187 $ hg perfbranchmapload
185 $ hg perfbranchmapupdate --base "not tip" --target "tip"
188 $ hg perfbranchmapupdate --base "not tip" --target "tip"
186 benchmark of branchmap with 3 revisions with 1 new ones
189 benchmark of branchmap with 3 revisions with 1 new ones
187 $ hg perfcca
190 $ hg perfcca
188 $ hg perfchangegroupchangelog
191 $ hg perfchangegroupchangelog
189 $ hg perfchangegroupchangelog --cgversion 01
192 $ hg perfchangegroupchangelog --cgversion 01
190 $ hg perfchangeset 2
193 $ hg perfchangeset 2
191 $ hg perfctxfiles 2
194 $ hg perfctxfiles 2
192 $ hg perfdiffwd
195 $ hg perfdiffwd
193 $ hg perfdirfoldmap
196 $ hg perfdirfoldmap
194 $ hg perfdirs
197 $ hg perfdirs
195 $ hg perfdirstate
198 $ hg perfdirstate
196 $ hg perfdirstatedirs
199 $ hg perfdirstatedirs
197 $ hg perfdirstatefoldmap
200 $ hg perfdirstatefoldmap
198 $ hg perfdirstatewrite
201 $ hg perfdirstatewrite
199 #if repofncache
202 #if repofncache
200 $ hg perffncacheencode
203 $ hg perffncacheencode
201 $ hg perffncacheload
204 $ hg perffncacheload
202 $ hg debugrebuildfncache
205 $ hg debugrebuildfncache
203 fncache already up to date
206 fncache already up to date
204 $ hg perffncachewrite
207 $ hg perffncachewrite
205 $ hg debugrebuildfncache
208 $ hg debugrebuildfncache
206 fncache already up to date
209 fncache already up to date
207 #endif
210 #endif
208 $ hg perfheads
211 $ hg perfheads
209 $ hg perfignore
212 $ hg perfignore
210 $ hg perfindex
213 $ hg perfindex
211 $ hg perflinelogedits -n 1
214 $ hg perflinelogedits -n 1
212 $ hg perfloadmarkers
215 $ hg perfloadmarkers
213 $ hg perflog
216 $ hg perflog
214 $ hg perflookup 2
217 $ hg perflookup 2
215 $ hg perflrucache
218 $ hg perflrucache
216 $ hg perfmanifest 2
219 $ hg perfmanifest 2
217 $ hg perfmanifest -m 44fe2c8352bb3a478ffd7d8350bbc721920134d1
220 $ hg perfmanifest -m 44fe2c8352bb3a478ffd7d8350bbc721920134d1
218 $ hg perfmanifest -m 44fe2c8352bb
221 $ hg perfmanifest -m 44fe2c8352bb
219 abort: manifest revision must be integer or full node
222 abort: manifest revision must be integer or full node
220 [255]
223 [255]
221 $ hg perfmergecalculate -r 3
224 $ hg perfmergecalculate -r 3
222 $ hg perfmoonwalk
225 $ hg perfmoonwalk
223 $ hg perfnodelookup 2
226 $ hg perfnodelookup 2
224 $ hg perfpathcopies 1 2
227 $ hg perfpathcopies 1 2
225 $ hg perfprogress --total 1000
228 $ hg perfprogress --total 1000
226 $ hg perfrawfiles 2
229 $ hg perfrawfiles 2
227 $ hg perfrevlogindex -c
230 $ hg perfrevlogindex -c
228 #if reporevlogstore
231 #if reporevlogstore
229 $ hg perfrevlogrevisions .hg/store/data/a.i
232 $ hg perfrevlogrevisions .hg/store/data/a.i
230 #endif
233 #endif
231 $ hg perfrevlogrevision -m 0
234 $ hg perfrevlogrevision -m 0
232 $ hg perfrevlogchunks -c
235 $ hg perfrevlogchunks -c
233 $ hg perfrevrange
236 $ hg perfrevrange
234 $ hg perfrevset 'all()'
237 $ hg perfrevset 'all()'
235 $ hg perfstartup
238 $ hg perfstartup
236 $ hg perfstatus
239 $ hg perfstatus
237 $ hg perftags
240 $ hg perftags
238 $ hg perftemplating
241 $ hg perftemplating
239 $ hg perfvolatilesets
242 $ hg perfvolatilesets
240 $ hg perfwalk
243 $ hg perfwalk
241 $ hg perfparents
244 $ hg perfparents
242 $ hg perfdiscovery -q .
245 $ hg perfdiscovery -q .
243
246
244 Test run control
247 Test run control
245 ----------------
248 ----------------
246
249
247 Simple single entry
250 Simple single entry
248
251
249 $ hg perfparents --config perf.stub=no --config perf.run-limits='0.000000001-15'
252 $ hg perfparents --config perf.stub=no --config perf.run-limits='0.000000001-15'
250 ! wall * comb * user * sys * (best of 15) (glob)
253 ! wall * comb * user * sys * (best of 15) (glob)
251
254
252 Multiple entries
255 Multiple entries
253
256
254 $ hg perfparents --config perf.stub=no --config perf.run-limits='500000-1, 0.000000001-5'
257 $ hg perfparents --config perf.stub=no --config perf.run-limits='500000-1, 0.000000001-5'
255 ! wall * comb * user * sys * (best of 5) (glob)
258 ! wall * comb * user * sys * (best of 5) (glob)
256
259
257 error case are ignored
260 error case are ignored
258
261
259 $ hg perfparents --config perf.stub=no --config perf.run-limits='500, 0.000000001-5'
262 $ hg perfparents --config perf.stub=no --config perf.run-limits='500, 0.000000001-5'
260 malformatted run limit entry, missing "-": 500
263 malformatted run limit entry, missing "-": 500
261 ! wall * comb * user * sys * (best of 5) (glob)
264 ! wall * comb * user * sys * (best of 5) (glob)
262 $ hg perfparents --config perf.stub=no --config perf.run-limits='aaa-12, 0.000000001-5'
265 $ hg perfparents --config perf.stub=no --config perf.run-limits='aaa-12, 0.000000001-5'
263 malformatted run limit entry, could not convert string to float: aaa: aaa-12 (no-py3 !)
266 malformatted run limit entry, could not convert string to float: aaa: aaa-12 (no-py3 !)
264 malformatted run limit entry, could not convert string to float: 'aaa': aaa-12 (py3 !)
267 malformatted run limit entry, could not convert string to float: 'aaa': aaa-12 (py3 !)
265 ! wall * comb * user * sys * (best of 5) (glob)
268 ! wall * comb * user * sys * (best of 5) (glob)
266 $ hg perfparents --config perf.stub=no --config perf.run-limits='12-aaaaaa, 0.000000001-5'
269 $ hg perfparents --config perf.stub=no --config perf.run-limits='12-aaaaaa, 0.000000001-5'
267 malformatted run limit entry, invalid literal for int() with base 10: 'aaaaaa': 12-aaaaaa
270 malformatted run limit entry, invalid literal for int() with base 10: 'aaaaaa': 12-aaaaaa
268 ! wall * comb * user * sys * (best of 5) (glob)
271 ! wall * comb * user * sys * (best of 5) (glob)
269
272
270 test actual output
273 test actual output
271 ------------------
274 ------------------
272
275
273 normal output:
276 normal output:
274
277
275 $ hg perfheads --config perf.stub=no
278 $ hg perfheads --config perf.stub=no
276 ! wall * comb * user * sys * (best of *) (glob)
279 ! wall * comb * user * sys * (best of *) (glob)
277
280
278 detailed output:
281 detailed output:
279
282
280 $ hg perfheads --config perf.all-timing=yes --config perf.stub=no
283 $ hg perfheads --config perf.all-timing=yes --config perf.stub=no
281 ! wall * comb * user * sys * (best of *) (glob)
284 ! wall * comb * user * sys * (best of *) (glob)
282 ! wall * comb * user * sys * (max of *) (glob)
285 ! wall * comb * user * sys * (max of *) (glob)
283 ! wall * comb * user * sys * (avg of *) (glob)
286 ! wall * comb * user * sys * (avg of *) (glob)
284 ! wall * comb * user * sys * (median of *) (glob)
287 ! wall * comb * user * sys * (median of *) (glob)
285
288
286 test json output
289 test json output
287 ----------------
290 ----------------
288
291
289 normal output:
292 normal output:
290
293
291 $ hg perfheads --template json --config perf.stub=no
294 $ hg perfheads --template json --config perf.stub=no
292 [
295 [
293 {
296 {
294 "comb": *, (glob)
297 "comb": *, (glob)
295 "count": *, (glob)
298 "count": *, (glob)
296 "sys": *, (glob)
299 "sys": *, (glob)
297 "user": *, (glob)
300 "user": *, (glob)
298 "wall": * (glob)
301 "wall": * (glob)
299 }
302 }
300 ]
303 ]
301
304
302 detailed output:
305 detailed output:
303
306
304 $ hg perfheads --template json --config perf.all-timing=yes --config perf.stub=no
307 $ hg perfheads --template json --config perf.all-timing=yes --config perf.stub=no
305 [
308 [
306 {
309 {
307 "avg.comb": *, (glob)
310 "avg.comb": *, (glob)
308 "avg.count": *, (glob)
311 "avg.count": *, (glob)
309 "avg.sys": *, (glob)
312 "avg.sys": *, (glob)
310 "avg.user": *, (glob)
313 "avg.user": *, (glob)
311 "avg.wall": *, (glob)
314 "avg.wall": *, (glob)
312 "comb": *, (glob)
315 "comb": *, (glob)
313 "count": *, (glob)
316 "count": *, (glob)
314 "max.comb": *, (glob)
317 "max.comb": *, (glob)
315 "max.count": *, (glob)
318 "max.count": *, (glob)
316 "max.sys": *, (glob)
319 "max.sys": *, (glob)
317 "max.user": *, (glob)
320 "max.user": *, (glob)
318 "max.wall": *, (glob)
321 "max.wall": *, (glob)
319 "median.comb": *, (glob)
322 "median.comb": *, (glob)
320 "median.count": *, (glob)
323 "median.count": *, (glob)
321 "median.sys": *, (glob)
324 "median.sys": *, (glob)
322 "median.user": *, (glob)
325 "median.user": *, (glob)
323 "median.wall": *, (glob)
326 "median.wall": *, (glob)
324 "sys": *, (glob)
327 "sys": *, (glob)
325 "user": *, (glob)
328 "user": *, (glob)
326 "wall": * (glob)
329 "wall": * (glob)
327 }
330 }
328 ]
331 ]
329
332
333 Test pre-run feature
334 --------------------
335
336 (perf discovery has some spurious output)
337
338 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=0
339 ! wall * comb * user * sys * (best of 1) (glob)
340 searching for changes
341 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=1
342 ! wall * comb * user * sys * (best of 1) (glob)
343 searching for changes
344 searching for changes
345 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=3
346 ! wall * comb * user * sys * (best of 1) (glob)
347 searching for changes
348 searching for changes
349 searching for changes
350 searching for changes
351
330 Check perf.py for historical portability
352 Check perf.py for historical portability
331 ----------------------------------------
353 ----------------------------------------
332
354
333 $ cd "$TESTDIR/.."
355 $ cd "$TESTDIR/.."
334
356
335 $ (testrepohg files -r 1.2 glob:mercurial/*.c glob:mercurial/*.py;
357 $ (testrepohg files -r 1.2 glob:mercurial/*.c glob:mercurial/*.py;
336 > testrepohg files -r tip glob:mercurial/*.c glob:mercurial/*.py) |
358 > testrepohg files -r tip glob:mercurial/*.c glob:mercurial/*.py) |
337 > "$TESTDIR"/check-perf-code.py contrib/perf.py
359 > "$TESTDIR"/check-perf-code.py contrib/perf.py
338 contrib/perf.py:\d+: (re)
360 contrib/perf.py:\d+: (re)
339 > from mercurial import (
361 > from mercurial import (
340 import newer module separately in try clause for early Mercurial
362 import newer module separately in try clause for early Mercurial
341 contrib/perf.py:\d+: (re)
363 contrib/perf.py:\d+: (re)
342 > from mercurial import (
364 > from mercurial import (
343 import newer module separately in try clause for early Mercurial
365 import newer module separately in try clause for early Mercurial
344 contrib/perf.py:\d+: (re)
366 contrib/perf.py:\d+: (re)
345 > origindexpath = orig.opener.join(orig.indexfile)
367 > origindexpath = orig.opener.join(orig.indexfile)
346 use getvfs()/getsvfs() for early Mercurial
368 use getvfs()/getsvfs() for early Mercurial
347 contrib/perf.py:\d+: (re)
369 contrib/perf.py:\d+: (re)
348 > origdatapath = orig.opener.join(orig.datafile)
370 > origdatapath = orig.opener.join(orig.datafile)
349 use getvfs()/getsvfs() for early Mercurial
371 use getvfs()/getsvfs() for early Mercurial
350 contrib/perf.py:\d+: (re)
372 contrib/perf.py:\d+: (re)
351 > vfs = vfsmod.vfs(tmpdir)
373 > vfs = vfsmod.vfs(tmpdir)
352 use getvfs()/getsvfs() for early Mercurial
374 use getvfs()/getsvfs() for early Mercurial
353 contrib/perf.py:\d+: (re)
375 contrib/perf.py:\d+: (re)
354 > vfs.options = getattr(orig.opener, 'options', None)
376 > vfs.options = getattr(orig.opener, 'options', None)
355 use getvfs()/getsvfs() for early Mercurial
377 use getvfs()/getsvfs() for early Mercurial
356 [1]
378 [1]
General Comments 0
You need to be logged in to leave comments. Login now