##// END OF EJS Templates
py3: use range() instead of xrange()...
Pulkit Goyal -
r42562:c2d10506 default
parent child Browse files
Show More
@@ -1,2904 +1,2904 b''
1 # perf.py - performance test routines
1 # perf.py - performance test routines
2 '''helper extension to measure performance
2 '''helper extension to measure performance
3
3
4 Configurations
4 Configurations
5 ==============
5 ==============
6
6
7 ``perf``
7 ``perf``
8 --------
8 --------
9
9
10 ``all-timing``
10 ``all-timing``
11 When set, additional statistics will be reported for each benchmark: best,
11 When set, additional statistics will be reported for each benchmark: best,
12 worst, median average. If not set only the best timing is reported
12 worst, median average. If not set only the best timing is reported
13 (default: off).
13 (default: off).
14
14
15 ``presleep``
15 ``presleep``
16 number of second to wait before any group of runs (default: 1)
16 number of second to wait before any group of runs (default: 1)
17
17
18 ``pre-run``
18 ``pre-run``
19 number of run to perform before starting measurement.
19 number of run to perform before starting measurement.
20
20
21 ``profile-benchmark``
21 ``profile-benchmark``
22 Enable profiling for the benchmarked section.
22 Enable profiling for the benchmarked section.
23 (The first iteration is benchmarked)
23 (The first iteration is benchmarked)
24
24
25 ``run-limits``
25 ``run-limits``
26 Control the number of runs each benchmark will perform. The option value
26 Control the number of runs each benchmark will perform. The option value
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
28 conditions are considered in order with the following logic:
28 conditions are considered in order with the following logic:
29
29
30 If benchmark has been running for <time> seconds, and we have performed
30 If benchmark has been running for <time> seconds, and we have performed
31 <numberofrun> iterations, stop the benchmark,
31 <numberofrun> iterations, stop the benchmark,
32
32
33 The default value is: `3.0-100, 10.0-3`
33 The default value is: `3.0-100, 10.0-3`
34
34
35 ``stub``
35 ``stub``
36 When set, benchmarks will only be run once, useful for testing
36 When set, benchmarks will only be run once, useful for testing
37 (default: off)
37 (default: off)
38 '''
38 '''
39
39
40 # "historical portability" policy of perf.py:
40 # "historical portability" policy of perf.py:
41 #
41 #
42 # We have to do:
42 # We have to do:
43 # - make perf.py "loadable" with as wide Mercurial version as possible
43 # - make perf.py "loadable" with as wide Mercurial version as possible
44 # This doesn't mean that perf commands work correctly with that Mercurial.
44 # This doesn't mean that perf commands work correctly with that Mercurial.
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
46 # - make historical perf command work correctly with as wide Mercurial
46 # - make historical perf command work correctly with as wide Mercurial
47 # version as possible
47 # version as possible
48 #
48 #
49 # We have to do, if possible with reasonable cost:
49 # We have to do, if possible with reasonable cost:
50 # - make recent perf command for historical feature work correctly
50 # - make recent perf command for historical feature work correctly
51 # with early Mercurial
51 # with early Mercurial
52 #
52 #
53 # We don't have to do:
53 # We don't have to do:
54 # - make perf command for recent feature work correctly with early
54 # - make perf command for recent feature work correctly with early
55 # Mercurial
55 # Mercurial
56
56
57 from __future__ import absolute_import
57 from __future__ import absolute_import
58 import contextlib
58 import contextlib
59 import functools
59 import functools
60 import gc
60 import gc
61 import os
61 import os
62 import random
62 import random
63 import shutil
63 import shutil
64 import struct
64 import struct
65 import sys
65 import sys
66 import tempfile
66 import tempfile
67 import threading
67 import threading
68 import time
68 import time
69 from mercurial import (
69 from mercurial import (
70 changegroup,
70 changegroup,
71 cmdutil,
71 cmdutil,
72 commands,
72 commands,
73 copies,
73 copies,
74 error,
74 error,
75 extensions,
75 extensions,
76 hg,
76 hg,
77 mdiff,
77 mdiff,
78 merge,
78 merge,
79 revlog,
79 revlog,
80 util,
80 util,
81 )
81 )
82
82
83 # for "historical portability":
83 # for "historical portability":
84 # try to import modules separately (in dict order), and ignore
84 # try to import modules separately (in dict order), and ignore
85 # failure, because these aren't available with early Mercurial
85 # failure, because these aren't available with early Mercurial
86 try:
86 try:
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
88 except ImportError:
88 except ImportError:
89 pass
89 pass
90 try:
90 try:
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
92 except ImportError:
92 except ImportError:
93 pass
93 pass
94 try:
94 try:
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
96 dir(registrar) # forcibly load it
96 dir(registrar) # forcibly load it
97 except ImportError:
97 except ImportError:
98 registrar = None
98 registrar = None
99 try:
99 try:
100 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
100 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
101 except ImportError:
101 except ImportError:
102 pass
102 pass
103 try:
103 try:
104 from mercurial.utils import repoviewutil # since 5.0
104 from mercurial.utils import repoviewutil # since 5.0
105 except ImportError:
105 except ImportError:
106 repoviewutil = None
106 repoviewutil = None
107 try:
107 try:
108 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
108 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
109 except ImportError:
109 except ImportError:
110 pass
110 pass
111 try:
111 try:
112 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
112 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
113 except ImportError:
113 except ImportError:
114 pass
114 pass
115
115
116 try:
116 try:
117 from mercurial import profiling
117 from mercurial import profiling
118 except ImportError:
118 except ImportError:
119 profiling = None
119 profiling = None
120
120
121 def identity(a):
121 def identity(a):
122 return a
122 return a
123
123
124 try:
124 try:
125 from mercurial import pycompat
125 from mercurial import pycompat
126 getargspec = pycompat.getargspec # added to module after 4.5
126 getargspec = pycompat.getargspec # added to module after 4.5
127 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
127 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
128 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
128 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
129 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
129 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
130 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
130 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
131 if pycompat.ispy3:
131 if pycompat.ispy3:
132 _maxint = sys.maxsize # per py3 docs for replacing maxint
132 _maxint = sys.maxsize # per py3 docs for replacing maxint
133 else:
133 else:
134 _maxint = sys.maxint
134 _maxint = sys.maxint
135 except (ImportError, AttributeError):
135 except (ImportError, AttributeError):
136 import inspect
136 import inspect
137 getargspec = inspect.getargspec
137 getargspec = inspect.getargspec
138 _byteskwargs = identity
138 _byteskwargs = identity
139 fsencode = identity # no py3 support
139 fsencode = identity # no py3 support
140 _maxint = sys.maxint # no py3 support
140 _maxint = sys.maxint # no py3 support
141 _sysstr = lambda x: x # no py3 support
141 _sysstr = lambda x: x # no py3 support
142 _xrange = xrange
142 _xrange = xrange
143
143
144 try:
144 try:
145 # 4.7+
145 # 4.7+
146 queue = pycompat.queue.Queue
146 queue = pycompat.queue.Queue
147 except (AttributeError, ImportError):
147 except (AttributeError, ImportError):
148 # <4.7.
148 # <4.7.
149 try:
149 try:
150 queue = pycompat.queue
150 queue = pycompat.queue
151 except (AttributeError, ImportError):
151 except (AttributeError, ImportError):
152 queue = util.queue
152 queue = util.queue
153
153
154 try:
154 try:
155 from mercurial import logcmdutil
155 from mercurial import logcmdutil
156 makelogtemplater = logcmdutil.maketemplater
156 makelogtemplater = logcmdutil.maketemplater
157 except (AttributeError, ImportError):
157 except (AttributeError, ImportError):
158 try:
158 try:
159 makelogtemplater = cmdutil.makelogtemplater
159 makelogtemplater = cmdutil.makelogtemplater
160 except (AttributeError, ImportError):
160 except (AttributeError, ImportError):
161 makelogtemplater = None
161 makelogtemplater = None
162
162
163 # for "historical portability":
163 # for "historical portability":
164 # define util.safehasattr forcibly, because util.safehasattr has been
164 # define util.safehasattr forcibly, because util.safehasattr has been
165 # available since 1.9.3 (or 94b200a11cf7)
165 # available since 1.9.3 (or 94b200a11cf7)
166 _undefined = object()
166 _undefined = object()
167 def safehasattr(thing, attr):
167 def safehasattr(thing, attr):
168 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
168 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
169 setattr(util, 'safehasattr', safehasattr)
169 setattr(util, 'safehasattr', safehasattr)
170
170
171 # for "historical portability":
171 # for "historical portability":
172 # define util.timer forcibly, because util.timer has been available
172 # define util.timer forcibly, because util.timer has been available
173 # since ae5d60bb70c9
173 # since ae5d60bb70c9
174 if safehasattr(time, 'perf_counter'):
174 if safehasattr(time, 'perf_counter'):
175 util.timer = time.perf_counter
175 util.timer = time.perf_counter
176 elif os.name == b'nt':
176 elif os.name == b'nt':
177 util.timer = time.clock
177 util.timer = time.clock
178 else:
178 else:
179 util.timer = time.time
179 util.timer = time.time
180
180
181 # for "historical portability":
181 # for "historical portability":
182 # use locally defined empty option list, if formatteropts isn't
182 # use locally defined empty option list, if formatteropts isn't
183 # available, because commands.formatteropts has been available since
183 # available, because commands.formatteropts has been available since
184 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
184 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
185 # available since 2.2 (or ae5f92e154d3)
185 # available since 2.2 (or ae5f92e154d3)
186 formatteropts = getattr(cmdutil, "formatteropts",
186 formatteropts = getattr(cmdutil, "formatteropts",
187 getattr(commands, "formatteropts", []))
187 getattr(commands, "formatteropts", []))
188
188
189 # for "historical portability":
189 # for "historical portability":
190 # use locally defined option list, if debugrevlogopts isn't available,
190 # use locally defined option list, if debugrevlogopts isn't available,
191 # because commands.debugrevlogopts has been available since 3.7 (or
191 # because commands.debugrevlogopts has been available since 3.7 (or
192 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
192 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
193 # since 1.9 (or a79fea6b3e77).
193 # since 1.9 (or a79fea6b3e77).
194 revlogopts = getattr(cmdutil, "debugrevlogopts",
194 revlogopts = getattr(cmdutil, "debugrevlogopts",
195 getattr(commands, "debugrevlogopts", [
195 getattr(commands, "debugrevlogopts", [
196 (b'c', b'changelog', False, (b'open changelog')),
196 (b'c', b'changelog', False, (b'open changelog')),
197 (b'm', b'manifest', False, (b'open manifest')),
197 (b'm', b'manifest', False, (b'open manifest')),
198 (b'', b'dir', False, (b'open directory manifest')),
198 (b'', b'dir', False, (b'open directory manifest')),
199 ]))
199 ]))
200
200
201 cmdtable = {}
201 cmdtable = {}
202
202
203 # for "historical portability":
203 # for "historical portability":
204 # define parsealiases locally, because cmdutil.parsealiases has been
204 # define parsealiases locally, because cmdutil.parsealiases has been
205 # available since 1.5 (or 6252852b4332)
205 # available since 1.5 (or 6252852b4332)
206 def parsealiases(cmd):
206 def parsealiases(cmd):
207 return cmd.split(b"|")
207 return cmd.split(b"|")
208
208
209 if safehasattr(registrar, 'command'):
209 if safehasattr(registrar, 'command'):
210 command = registrar.command(cmdtable)
210 command = registrar.command(cmdtable)
211 elif safehasattr(cmdutil, 'command'):
211 elif safehasattr(cmdutil, 'command'):
212 command = cmdutil.command(cmdtable)
212 command = cmdutil.command(cmdtable)
213 if b'norepo' not in getargspec(command).args:
213 if b'norepo' not in getargspec(command).args:
214 # for "historical portability":
214 # for "historical portability":
215 # wrap original cmdutil.command, because "norepo" option has
215 # wrap original cmdutil.command, because "norepo" option has
216 # been available since 3.1 (or 75a96326cecb)
216 # been available since 3.1 (or 75a96326cecb)
217 _command = command
217 _command = command
218 def command(name, options=(), synopsis=None, norepo=False):
218 def command(name, options=(), synopsis=None, norepo=False):
219 if norepo:
219 if norepo:
220 commands.norepo += b' %s' % b' '.join(parsealiases(name))
220 commands.norepo += b' %s' % b' '.join(parsealiases(name))
221 return _command(name, list(options), synopsis)
221 return _command(name, list(options), synopsis)
222 else:
222 else:
223 # for "historical portability":
223 # for "historical portability":
224 # define "@command" annotation locally, because cmdutil.command
224 # define "@command" annotation locally, because cmdutil.command
225 # has been available since 1.9 (or 2daa5179e73f)
225 # has been available since 1.9 (or 2daa5179e73f)
226 def command(name, options=(), synopsis=None, norepo=False):
226 def command(name, options=(), synopsis=None, norepo=False):
227 def decorator(func):
227 def decorator(func):
228 if synopsis:
228 if synopsis:
229 cmdtable[name] = func, list(options), synopsis
229 cmdtable[name] = func, list(options), synopsis
230 else:
230 else:
231 cmdtable[name] = func, list(options)
231 cmdtable[name] = func, list(options)
232 if norepo:
232 if norepo:
233 commands.norepo += b' %s' % b' '.join(parsealiases(name))
233 commands.norepo += b' %s' % b' '.join(parsealiases(name))
234 return func
234 return func
235 return decorator
235 return decorator
236
236
237 try:
237 try:
238 import mercurial.registrar
238 import mercurial.registrar
239 import mercurial.configitems
239 import mercurial.configitems
240 configtable = {}
240 configtable = {}
241 configitem = mercurial.registrar.configitem(configtable)
241 configitem = mercurial.registrar.configitem(configtable)
242 configitem(b'perf', b'presleep',
242 configitem(b'perf', b'presleep',
243 default=mercurial.configitems.dynamicdefault,
243 default=mercurial.configitems.dynamicdefault,
244 )
244 )
245 configitem(b'perf', b'stub',
245 configitem(b'perf', b'stub',
246 default=mercurial.configitems.dynamicdefault,
246 default=mercurial.configitems.dynamicdefault,
247 )
247 )
248 configitem(b'perf', b'parentscount',
248 configitem(b'perf', b'parentscount',
249 default=mercurial.configitems.dynamicdefault,
249 default=mercurial.configitems.dynamicdefault,
250 )
250 )
251 configitem(b'perf', b'all-timing',
251 configitem(b'perf', b'all-timing',
252 default=mercurial.configitems.dynamicdefault,
252 default=mercurial.configitems.dynamicdefault,
253 )
253 )
254 configitem(b'perf', b'pre-run',
254 configitem(b'perf', b'pre-run',
255 default=mercurial.configitems.dynamicdefault,
255 default=mercurial.configitems.dynamicdefault,
256 )
256 )
257 configitem(b'perf', b'profile-benchmark',
257 configitem(b'perf', b'profile-benchmark',
258 default=mercurial.configitems.dynamicdefault,
258 default=mercurial.configitems.dynamicdefault,
259 )
259 )
260 configitem(b'perf', b'run-limits',
260 configitem(b'perf', b'run-limits',
261 default=mercurial.configitems.dynamicdefault,
261 default=mercurial.configitems.dynamicdefault,
262 )
262 )
263 except (ImportError, AttributeError):
263 except (ImportError, AttributeError):
264 pass
264 pass
265
265
266 def getlen(ui):
266 def getlen(ui):
267 if ui.configbool(b"perf", b"stub", False):
267 if ui.configbool(b"perf", b"stub", False):
268 return lambda x: 1
268 return lambda x: 1
269 return len
269 return len
270
270
271 class noop(object):
271 class noop(object):
272 """dummy context manager"""
272 """dummy context manager"""
273 def __enter__(self):
273 def __enter__(self):
274 pass
274 pass
275 def __exit__(self, *args):
275 def __exit__(self, *args):
276 pass
276 pass
277
277
278 NOOPCTX = noop()
278 NOOPCTX = noop()
279
279
280 def gettimer(ui, opts=None):
280 def gettimer(ui, opts=None):
281 """return a timer function and formatter: (timer, formatter)
281 """return a timer function and formatter: (timer, formatter)
282
282
283 This function exists to gather the creation of formatter in a single
283 This function exists to gather the creation of formatter in a single
284 place instead of duplicating it in all performance commands."""
284 place instead of duplicating it in all performance commands."""
285
285
286 # enforce an idle period before execution to counteract power management
286 # enforce an idle period before execution to counteract power management
287 # experimental config: perf.presleep
287 # experimental config: perf.presleep
288 time.sleep(getint(ui, b"perf", b"presleep", 1))
288 time.sleep(getint(ui, b"perf", b"presleep", 1))
289
289
290 if opts is None:
290 if opts is None:
291 opts = {}
291 opts = {}
292 # redirect all to stderr unless buffer api is in use
292 # redirect all to stderr unless buffer api is in use
293 if not ui._buffers:
293 if not ui._buffers:
294 ui = ui.copy()
294 ui = ui.copy()
295 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
295 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
296 if uifout:
296 if uifout:
297 # for "historical portability":
297 # for "historical portability":
298 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
298 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
299 uifout.set(ui.ferr)
299 uifout.set(ui.ferr)
300
300
301 # get a formatter
301 # get a formatter
302 uiformatter = getattr(ui, 'formatter', None)
302 uiformatter = getattr(ui, 'formatter', None)
303 if uiformatter:
303 if uiformatter:
304 fm = uiformatter(b'perf', opts)
304 fm = uiformatter(b'perf', opts)
305 else:
305 else:
306 # for "historical portability":
306 # for "historical portability":
307 # define formatter locally, because ui.formatter has been
307 # define formatter locally, because ui.formatter has been
308 # available since 2.2 (or ae5f92e154d3)
308 # available since 2.2 (or ae5f92e154d3)
309 from mercurial import node
309 from mercurial import node
310 class defaultformatter(object):
310 class defaultformatter(object):
311 """Minimized composition of baseformatter and plainformatter
311 """Minimized composition of baseformatter and plainformatter
312 """
312 """
313 def __init__(self, ui, topic, opts):
313 def __init__(self, ui, topic, opts):
314 self._ui = ui
314 self._ui = ui
315 if ui.debugflag:
315 if ui.debugflag:
316 self.hexfunc = node.hex
316 self.hexfunc = node.hex
317 else:
317 else:
318 self.hexfunc = node.short
318 self.hexfunc = node.short
319 def __nonzero__(self):
319 def __nonzero__(self):
320 return False
320 return False
321 __bool__ = __nonzero__
321 __bool__ = __nonzero__
322 def startitem(self):
322 def startitem(self):
323 pass
323 pass
324 def data(self, **data):
324 def data(self, **data):
325 pass
325 pass
326 def write(self, fields, deftext, *fielddata, **opts):
326 def write(self, fields, deftext, *fielddata, **opts):
327 self._ui.write(deftext % fielddata, **opts)
327 self._ui.write(deftext % fielddata, **opts)
328 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
328 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
329 if cond:
329 if cond:
330 self._ui.write(deftext % fielddata, **opts)
330 self._ui.write(deftext % fielddata, **opts)
331 def plain(self, text, **opts):
331 def plain(self, text, **opts):
332 self._ui.write(text, **opts)
332 self._ui.write(text, **opts)
333 def end(self):
333 def end(self):
334 pass
334 pass
335 fm = defaultformatter(ui, b'perf', opts)
335 fm = defaultformatter(ui, b'perf', opts)
336
336
337 # stub function, runs code only once instead of in a loop
337 # stub function, runs code only once instead of in a loop
338 # experimental config: perf.stub
338 # experimental config: perf.stub
339 if ui.configbool(b"perf", b"stub", False):
339 if ui.configbool(b"perf", b"stub", False):
340 return functools.partial(stub_timer, fm), fm
340 return functools.partial(stub_timer, fm), fm
341
341
342 # experimental config: perf.all-timing
342 # experimental config: perf.all-timing
343 displayall = ui.configbool(b"perf", b"all-timing", False)
343 displayall = ui.configbool(b"perf", b"all-timing", False)
344
344
345 # experimental config: perf.run-limits
345 # experimental config: perf.run-limits
346 limitspec = ui.configlist(b"perf", b"run-limits", [])
346 limitspec = ui.configlist(b"perf", b"run-limits", [])
347 limits = []
347 limits = []
348 for item in limitspec:
348 for item in limitspec:
349 parts = item.split(b'-', 1)
349 parts = item.split(b'-', 1)
350 if len(parts) < 2:
350 if len(parts) < 2:
351 ui.warn((b'malformatted run limit entry, missing "-": %s\n'
351 ui.warn((b'malformatted run limit entry, missing "-": %s\n'
352 % item))
352 % item))
353 continue
353 continue
354 try:
354 try:
355 time_limit = float(pycompat.sysstr(parts[0]))
355 time_limit = float(pycompat.sysstr(parts[0]))
356 except ValueError as e:
356 except ValueError as e:
357 ui.warn((b'malformatted run limit entry, %s: %s\n'
357 ui.warn((b'malformatted run limit entry, %s: %s\n'
358 % (pycompat.bytestr(e), item)))
358 % (pycompat.bytestr(e), item)))
359 continue
359 continue
360 try:
360 try:
361 run_limit = int(pycompat.sysstr(parts[1]))
361 run_limit = int(pycompat.sysstr(parts[1]))
362 except ValueError as e:
362 except ValueError as e:
363 ui.warn((b'malformatted run limit entry, %s: %s\n'
363 ui.warn((b'malformatted run limit entry, %s: %s\n'
364 % (pycompat.bytestr(e), item)))
364 % (pycompat.bytestr(e), item)))
365 continue
365 continue
366 limits.append((time_limit, run_limit))
366 limits.append((time_limit, run_limit))
367 if not limits:
367 if not limits:
368 limits = DEFAULTLIMITS
368 limits = DEFAULTLIMITS
369
369
370 profiler = None
370 profiler = None
371 if profiling is not None:
371 if profiling is not None:
372 if ui.configbool(b"perf", b"profile-benchmark", False):
372 if ui.configbool(b"perf", b"profile-benchmark", False):
373 profiler = profiling.profile(ui)
373 profiler = profiling.profile(ui)
374
374
375 prerun = getint(ui, b"perf", b"pre-run", 0)
375 prerun = getint(ui, b"perf", b"pre-run", 0)
376 t = functools.partial(_timer, fm, displayall=displayall, limits=limits,
376 t = functools.partial(_timer, fm, displayall=displayall, limits=limits,
377 prerun=prerun, profiler=profiler)
377 prerun=prerun, profiler=profiler)
378 return t, fm
378 return t, fm
379
379
380 def stub_timer(fm, func, setup=None, title=None):
380 def stub_timer(fm, func, setup=None, title=None):
381 if setup is not None:
381 if setup is not None:
382 setup()
382 setup()
383 func()
383 func()
384
384
385 @contextlib.contextmanager
385 @contextlib.contextmanager
386 def timeone():
386 def timeone():
387 r = []
387 r = []
388 ostart = os.times()
388 ostart = os.times()
389 cstart = util.timer()
389 cstart = util.timer()
390 yield r
390 yield r
391 cstop = util.timer()
391 cstop = util.timer()
392 ostop = os.times()
392 ostop = os.times()
393 a, b = ostart, ostop
393 a, b = ostart, ostop
394 r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
394 r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
395
395
396
396
397 # list of stop condition (elapsed time, minimal run count)
397 # list of stop condition (elapsed time, minimal run count)
398 DEFAULTLIMITS = (
398 DEFAULTLIMITS = (
399 (3.0, 100),
399 (3.0, 100),
400 (10.0, 3),
400 (10.0, 3),
401 )
401 )
402
402
403 def _timer(fm, func, setup=None, title=None, displayall=False,
403 def _timer(fm, func, setup=None, title=None, displayall=False,
404 limits=DEFAULTLIMITS, prerun=0, profiler=None):
404 limits=DEFAULTLIMITS, prerun=0, profiler=None):
405 gc.collect()
405 gc.collect()
406 results = []
406 results = []
407 begin = util.timer()
407 begin = util.timer()
408 count = 0
408 count = 0
409 if profiler is None:
409 if profiler is None:
410 profiler = NOOPCTX
410 profiler = NOOPCTX
411 for i in xrange(prerun):
411 for i in range(prerun):
412 if setup is not None:
412 if setup is not None:
413 setup()
413 setup()
414 func()
414 func()
415 keepgoing = True
415 keepgoing = True
416 while keepgoing:
416 while keepgoing:
417 if setup is not None:
417 if setup is not None:
418 setup()
418 setup()
419 with profiler:
419 with profiler:
420 with timeone() as item:
420 with timeone() as item:
421 r = func()
421 r = func()
422 profiler = NOOPCTX
422 profiler = NOOPCTX
423 count += 1
423 count += 1
424 results.append(item[0])
424 results.append(item[0])
425 cstop = util.timer()
425 cstop = util.timer()
426 # Look for a stop condition.
426 # Look for a stop condition.
427 elapsed = cstop - begin
427 elapsed = cstop - begin
428 for t, mincount in limits:
428 for t, mincount in limits:
429 if elapsed >= t and count >= mincount:
429 if elapsed >= t and count >= mincount:
430 keepgoing = False
430 keepgoing = False
431 break
431 break
432
432
433 formatone(fm, results, title=title, result=r,
433 formatone(fm, results, title=title, result=r,
434 displayall=displayall)
434 displayall=displayall)
435
435
436 def formatone(fm, timings, title=None, result=None, displayall=False):
436 def formatone(fm, timings, title=None, result=None, displayall=False):
437
437
438 count = len(timings)
438 count = len(timings)
439
439
440 fm.startitem()
440 fm.startitem()
441
441
442 if title:
442 if title:
443 fm.write(b'title', b'! %s\n', title)
443 fm.write(b'title', b'! %s\n', title)
444 if result:
444 if result:
445 fm.write(b'result', b'! result: %s\n', result)
445 fm.write(b'result', b'! result: %s\n', result)
446 def display(role, entry):
446 def display(role, entry):
447 prefix = b''
447 prefix = b''
448 if role != b'best':
448 if role != b'best':
449 prefix = b'%s.' % role
449 prefix = b'%s.' % role
450 fm.plain(b'!')
450 fm.plain(b'!')
451 fm.write(prefix + b'wall', b' wall %f', entry[0])
451 fm.write(prefix + b'wall', b' wall %f', entry[0])
452 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
452 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
453 fm.write(prefix + b'user', b' user %f', entry[1])
453 fm.write(prefix + b'user', b' user %f', entry[1])
454 fm.write(prefix + b'sys', b' sys %f', entry[2])
454 fm.write(prefix + b'sys', b' sys %f', entry[2])
455 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
455 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
456 fm.plain(b'\n')
456 fm.plain(b'\n')
457 timings.sort()
457 timings.sort()
458 min_val = timings[0]
458 min_val = timings[0]
459 display(b'best', min_val)
459 display(b'best', min_val)
460 if displayall:
460 if displayall:
461 max_val = timings[-1]
461 max_val = timings[-1]
462 display(b'max', max_val)
462 display(b'max', max_val)
463 avg = tuple([sum(x) / count for x in zip(*timings)])
463 avg = tuple([sum(x) / count for x in zip(*timings)])
464 display(b'avg', avg)
464 display(b'avg', avg)
465 median = timings[len(timings) // 2]
465 median = timings[len(timings) // 2]
466 display(b'median', median)
466 display(b'median', median)
467
467
468 # utilities for historical portability
468 # utilities for historical portability
469
469
470 def getint(ui, section, name, default):
470 def getint(ui, section, name, default):
471 # for "historical portability":
471 # for "historical portability":
472 # ui.configint has been available since 1.9 (or fa2b596db182)
472 # ui.configint has been available since 1.9 (or fa2b596db182)
473 v = ui.config(section, name, None)
473 v = ui.config(section, name, None)
474 if v is None:
474 if v is None:
475 return default
475 return default
476 try:
476 try:
477 return int(v)
477 return int(v)
478 except ValueError:
478 except ValueError:
479 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
479 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
480 % (section, name, v))
480 % (section, name, v))
481
481
482 def safeattrsetter(obj, name, ignoremissing=False):
482 def safeattrsetter(obj, name, ignoremissing=False):
483 """Ensure that 'obj' has 'name' attribute before subsequent setattr
483 """Ensure that 'obj' has 'name' attribute before subsequent setattr
484
484
485 This function is aborted, if 'obj' doesn't have 'name' attribute
485 This function is aborted, if 'obj' doesn't have 'name' attribute
486 at runtime. This avoids overlooking removal of an attribute, which
486 at runtime. This avoids overlooking removal of an attribute, which
487 breaks assumption of performance measurement, in the future.
487 breaks assumption of performance measurement, in the future.
488
488
489 This function returns the object to (1) assign a new value, and
489 This function returns the object to (1) assign a new value, and
490 (2) restore an original value to the attribute.
490 (2) restore an original value to the attribute.
491
491
492 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
492 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
493 abortion, and this function returns None. This is useful to
493 abortion, and this function returns None. This is useful to
494 examine an attribute, which isn't ensured in all Mercurial
494 examine an attribute, which isn't ensured in all Mercurial
495 versions.
495 versions.
496 """
496 """
497 if not util.safehasattr(obj, name):
497 if not util.safehasattr(obj, name):
498 if ignoremissing:
498 if ignoremissing:
499 return None
499 return None
500 raise error.Abort((b"missing attribute %s of %s might break assumption"
500 raise error.Abort((b"missing attribute %s of %s might break assumption"
501 b" of performance measurement") % (name, obj))
501 b" of performance measurement") % (name, obj))
502
502
503 origvalue = getattr(obj, _sysstr(name))
503 origvalue = getattr(obj, _sysstr(name))
504 class attrutil(object):
504 class attrutil(object):
505 def set(self, newvalue):
505 def set(self, newvalue):
506 setattr(obj, _sysstr(name), newvalue)
506 setattr(obj, _sysstr(name), newvalue)
507 def restore(self):
507 def restore(self):
508 setattr(obj, _sysstr(name), origvalue)
508 setattr(obj, _sysstr(name), origvalue)
509
509
510 return attrutil()
510 return attrutil()
511
511
512 # utilities to examine each internal API changes
512 # utilities to examine each internal API changes
513
513
514 def getbranchmapsubsettable():
514 def getbranchmapsubsettable():
515 # for "historical portability":
515 # for "historical portability":
516 # subsettable is defined in:
516 # subsettable is defined in:
517 # - branchmap since 2.9 (or 175c6fd8cacc)
517 # - branchmap since 2.9 (or 175c6fd8cacc)
518 # - repoview since 2.5 (or 59a9f18d4587)
518 # - repoview since 2.5 (or 59a9f18d4587)
519 # - repoviewutil since 5.0
519 # - repoviewutil since 5.0
520 for mod in (branchmap, repoview, repoviewutil):
520 for mod in (branchmap, repoview, repoviewutil):
521 subsettable = getattr(mod, 'subsettable', None)
521 subsettable = getattr(mod, 'subsettable', None)
522 if subsettable:
522 if subsettable:
523 return subsettable
523 return subsettable
524
524
525 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
525 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
526 # branchmap and repoview modules exist, but subsettable attribute
526 # branchmap and repoview modules exist, but subsettable attribute
527 # doesn't)
527 # doesn't)
528 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
528 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
529 hint=b"use 2.5 or later")
529 hint=b"use 2.5 or later")
530
530
531 def getsvfs(repo):
531 def getsvfs(repo):
532 """Return appropriate object to access files under .hg/store
532 """Return appropriate object to access files under .hg/store
533 """
533 """
534 # for "historical portability":
534 # for "historical portability":
535 # repo.svfs has been available since 2.3 (or 7034365089bf)
535 # repo.svfs has been available since 2.3 (or 7034365089bf)
536 svfs = getattr(repo, 'svfs', None)
536 svfs = getattr(repo, 'svfs', None)
537 if svfs:
537 if svfs:
538 return svfs
538 return svfs
539 else:
539 else:
540 return getattr(repo, 'sopener')
540 return getattr(repo, 'sopener')
541
541
542 def getvfs(repo):
542 def getvfs(repo):
543 """Return appropriate object to access files under .hg
543 """Return appropriate object to access files under .hg
544 """
544 """
545 # for "historical portability":
545 # for "historical portability":
546 # repo.vfs has been available since 2.3 (or 7034365089bf)
546 # repo.vfs has been available since 2.3 (or 7034365089bf)
547 vfs = getattr(repo, 'vfs', None)
547 vfs = getattr(repo, 'vfs', None)
548 if vfs:
548 if vfs:
549 return vfs
549 return vfs
550 else:
550 else:
551 return getattr(repo, 'opener')
551 return getattr(repo, 'opener')
552
552
553 def repocleartagscachefunc(repo):
553 def repocleartagscachefunc(repo):
554 """Return the function to clear tags cache according to repo internal API
554 """Return the function to clear tags cache according to repo internal API
555 """
555 """
556 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
556 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
557 # in this case, setattr(repo, '_tagscache', None) or so isn't
557 # in this case, setattr(repo, '_tagscache', None) or so isn't
558 # correct way to clear tags cache, because existing code paths
558 # correct way to clear tags cache, because existing code paths
559 # expect _tagscache to be a structured object.
559 # expect _tagscache to be a structured object.
560 def clearcache():
560 def clearcache():
561 # _tagscache has been filteredpropertycache since 2.5 (or
561 # _tagscache has been filteredpropertycache since 2.5 (or
562 # 98c867ac1330), and delattr() can't work in such case
562 # 98c867ac1330), and delattr() can't work in such case
563 if b'_tagscache' in vars(repo):
563 if b'_tagscache' in vars(repo):
564 del repo.__dict__[b'_tagscache']
564 del repo.__dict__[b'_tagscache']
565 return clearcache
565 return clearcache
566
566
567 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
567 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
568 if repotags: # since 1.4 (or 5614a628d173)
568 if repotags: # since 1.4 (or 5614a628d173)
569 return lambda : repotags.set(None)
569 return lambda : repotags.set(None)
570
570
571 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
571 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
572 if repotagscache: # since 0.6 (or d7df759d0e97)
572 if repotagscache: # since 0.6 (or d7df759d0e97)
573 return lambda : repotagscache.set(None)
573 return lambda : repotagscache.set(None)
574
574
575 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
575 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
576 # this point, but it isn't so problematic, because:
576 # this point, but it isn't so problematic, because:
577 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
577 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
578 # in perftags() causes failure soon
578 # in perftags() causes failure soon
579 # - perf.py itself has been available since 1.1 (or eb240755386d)
579 # - perf.py itself has been available since 1.1 (or eb240755386d)
580 raise error.Abort((b"tags API of this hg command is unknown"))
580 raise error.Abort((b"tags API of this hg command is unknown"))
581
581
582 # utilities to clear cache
582 # utilities to clear cache
583
583
584 def clearfilecache(obj, attrname):
584 def clearfilecache(obj, attrname):
585 unfiltered = getattr(obj, 'unfiltered', None)
585 unfiltered = getattr(obj, 'unfiltered', None)
586 if unfiltered is not None:
586 if unfiltered is not None:
587 obj = obj.unfiltered()
587 obj = obj.unfiltered()
588 if attrname in vars(obj):
588 if attrname in vars(obj):
589 delattr(obj, attrname)
589 delattr(obj, attrname)
590 obj._filecache.pop(attrname, None)
590 obj._filecache.pop(attrname, None)
591
591
592 def clearchangelog(repo):
592 def clearchangelog(repo):
593 if repo is not repo.unfiltered():
593 if repo is not repo.unfiltered():
594 object.__setattr__(repo, r'_clcachekey', None)
594 object.__setattr__(repo, r'_clcachekey', None)
595 object.__setattr__(repo, r'_clcache', None)
595 object.__setattr__(repo, r'_clcache', None)
596 clearfilecache(repo.unfiltered(), 'changelog')
596 clearfilecache(repo.unfiltered(), 'changelog')
597
597
598 # perf commands
598 # perf commands
599
599
600 @command(b'perfwalk', formatteropts)
600 @command(b'perfwalk', formatteropts)
601 def perfwalk(ui, repo, *pats, **opts):
601 def perfwalk(ui, repo, *pats, **opts):
602 opts = _byteskwargs(opts)
602 opts = _byteskwargs(opts)
603 timer, fm = gettimer(ui, opts)
603 timer, fm = gettimer(ui, opts)
604 m = scmutil.match(repo[None], pats, {})
604 m = scmutil.match(repo[None], pats, {})
605 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
605 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
606 ignored=False))))
606 ignored=False))))
607 fm.end()
607 fm.end()
608
608
609 @command(b'perfannotate', formatteropts)
609 @command(b'perfannotate', formatteropts)
610 def perfannotate(ui, repo, f, **opts):
610 def perfannotate(ui, repo, f, **opts):
611 opts = _byteskwargs(opts)
611 opts = _byteskwargs(opts)
612 timer, fm = gettimer(ui, opts)
612 timer, fm = gettimer(ui, opts)
613 fc = repo[b'.'][f]
613 fc = repo[b'.'][f]
614 timer(lambda: len(fc.annotate(True)))
614 timer(lambda: len(fc.annotate(True)))
615 fm.end()
615 fm.end()
616
616
617 @command(b'perfstatus',
617 @command(b'perfstatus',
618 [(b'u', b'unknown', False,
618 [(b'u', b'unknown', False,
619 b'ask status to look for unknown files')] + formatteropts)
619 b'ask status to look for unknown files')] + formatteropts)
620 def perfstatus(ui, repo, **opts):
620 def perfstatus(ui, repo, **opts):
621 opts = _byteskwargs(opts)
621 opts = _byteskwargs(opts)
622 #m = match.always(repo.root, repo.getcwd())
622 #m = match.always(repo.root, repo.getcwd())
623 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
623 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
624 # False))))
624 # False))))
625 timer, fm = gettimer(ui, opts)
625 timer, fm = gettimer(ui, opts)
626 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
626 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
627 fm.end()
627 fm.end()
628
628
629 @command(b'perfaddremove', formatteropts)
629 @command(b'perfaddremove', formatteropts)
630 def perfaddremove(ui, repo, **opts):
630 def perfaddremove(ui, repo, **opts):
631 opts = _byteskwargs(opts)
631 opts = _byteskwargs(opts)
632 timer, fm = gettimer(ui, opts)
632 timer, fm = gettimer(ui, opts)
633 try:
633 try:
634 oldquiet = repo.ui.quiet
634 oldquiet = repo.ui.quiet
635 repo.ui.quiet = True
635 repo.ui.quiet = True
636 matcher = scmutil.match(repo[None])
636 matcher = scmutil.match(repo[None])
637 opts[b'dry_run'] = True
637 opts[b'dry_run'] = True
638 if b'uipathfn' in getargspec(scmutil.addremove).args:
638 if b'uipathfn' in getargspec(scmutil.addremove).args:
639 uipathfn = scmutil.getuipathfn(repo)
639 uipathfn = scmutil.getuipathfn(repo)
640 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
640 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
641 else:
641 else:
642 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
642 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
643 finally:
643 finally:
644 repo.ui.quiet = oldquiet
644 repo.ui.quiet = oldquiet
645 fm.end()
645 fm.end()
646
646
647 def clearcaches(cl):
647 def clearcaches(cl):
648 # behave somewhat consistently across internal API changes
648 # behave somewhat consistently across internal API changes
649 if util.safehasattr(cl, b'clearcaches'):
649 if util.safehasattr(cl, b'clearcaches'):
650 cl.clearcaches()
650 cl.clearcaches()
651 elif util.safehasattr(cl, b'_nodecache'):
651 elif util.safehasattr(cl, b'_nodecache'):
652 from mercurial.node import nullid, nullrev
652 from mercurial.node import nullid, nullrev
653 cl._nodecache = {nullid: nullrev}
653 cl._nodecache = {nullid: nullrev}
654 cl._nodepos = None
654 cl._nodepos = None
655
655
656 @command(b'perfheads', formatteropts)
656 @command(b'perfheads', formatteropts)
657 def perfheads(ui, repo, **opts):
657 def perfheads(ui, repo, **opts):
658 """benchmark the computation of a changelog heads"""
658 """benchmark the computation of a changelog heads"""
659 opts = _byteskwargs(opts)
659 opts = _byteskwargs(opts)
660 timer, fm = gettimer(ui, opts)
660 timer, fm = gettimer(ui, opts)
661 cl = repo.changelog
661 cl = repo.changelog
662 def s():
662 def s():
663 clearcaches(cl)
663 clearcaches(cl)
664 def d():
664 def d():
665 len(cl.headrevs())
665 len(cl.headrevs())
666 timer(d, setup=s)
666 timer(d, setup=s)
667 fm.end()
667 fm.end()
668
668
669 @command(b'perftags', formatteropts+
669 @command(b'perftags', formatteropts+
670 [
670 [
671 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
671 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
672 ])
672 ])
673 def perftags(ui, repo, **opts):
673 def perftags(ui, repo, **opts):
674 opts = _byteskwargs(opts)
674 opts = _byteskwargs(opts)
675 timer, fm = gettimer(ui, opts)
675 timer, fm = gettimer(ui, opts)
676 repocleartagscache = repocleartagscachefunc(repo)
676 repocleartagscache = repocleartagscachefunc(repo)
677 clearrevlogs = opts[b'clear_revlogs']
677 clearrevlogs = opts[b'clear_revlogs']
678 def s():
678 def s():
679 if clearrevlogs:
679 if clearrevlogs:
680 clearchangelog(repo)
680 clearchangelog(repo)
681 clearfilecache(repo.unfiltered(), 'manifest')
681 clearfilecache(repo.unfiltered(), 'manifest')
682 repocleartagscache()
682 repocleartagscache()
683 def t():
683 def t():
684 return len(repo.tags())
684 return len(repo.tags())
685 timer(t, setup=s)
685 timer(t, setup=s)
686 fm.end()
686 fm.end()
687
687
688 @command(b'perfancestors', formatteropts)
688 @command(b'perfancestors', formatteropts)
689 def perfancestors(ui, repo, **opts):
689 def perfancestors(ui, repo, **opts):
690 opts = _byteskwargs(opts)
690 opts = _byteskwargs(opts)
691 timer, fm = gettimer(ui, opts)
691 timer, fm = gettimer(ui, opts)
692 heads = repo.changelog.headrevs()
692 heads = repo.changelog.headrevs()
693 def d():
693 def d():
694 for a in repo.changelog.ancestors(heads):
694 for a in repo.changelog.ancestors(heads):
695 pass
695 pass
696 timer(d)
696 timer(d)
697 fm.end()
697 fm.end()
698
698
699 @command(b'perfancestorset', formatteropts)
699 @command(b'perfancestorset', formatteropts)
700 def perfancestorset(ui, repo, revset, **opts):
700 def perfancestorset(ui, repo, revset, **opts):
701 opts = _byteskwargs(opts)
701 opts = _byteskwargs(opts)
702 timer, fm = gettimer(ui, opts)
702 timer, fm = gettimer(ui, opts)
703 revs = repo.revs(revset)
703 revs = repo.revs(revset)
704 heads = repo.changelog.headrevs()
704 heads = repo.changelog.headrevs()
705 def d():
705 def d():
706 s = repo.changelog.ancestors(heads)
706 s = repo.changelog.ancestors(heads)
707 for rev in revs:
707 for rev in revs:
708 rev in s
708 rev in s
709 timer(d)
709 timer(d)
710 fm.end()
710 fm.end()
711
711
712 @command(b'perfdiscovery', formatteropts, b'PATH')
712 @command(b'perfdiscovery', formatteropts, b'PATH')
713 def perfdiscovery(ui, repo, path, **opts):
713 def perfdiscovery(ui, repo, path, **opts):
714 """benchmark discovery between local repo and the peer at given path
714 """benchmark discovery between local repo and the peer at given path
715 """
715 """
716 repos = [repo, None]
716 repos = [repo, None]
717 timer, fm = gettimer(ui, opts)
717 timer, fm = gettimer(ui, opts)
718 path = ui.expandpath(path)
718 path = ui.expandpath(path)
719
719
720 def s():
720 def s():
721 repos[1] = hg.peer(ui, opts, path)
721 repos[1] = hg.peer(ui, opts, path)
722 def d():
722 def d():
723 setdiscovery.findcommonheads(ui, *repos)
723 setdiscovery.findcommonheads(ui, *repos)
724 timer(d, setup=s)
724 timer(d, setup=s)
725 fm.end()
725 fm.end()
726
726
727 @command(b'perfbookmarks', formatteropts +
727 @command(b'perfbookmarks', formatteropts +
728 [
728 [
729 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
729 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
730 ])
730 ])
731 def perfbookmarks(ui, repo, **opts):
731 def perfbookmarks(ui, repo, **opts):
732 """benchmark parsing bookmarks from disk to memory"""
732 """benchmark parsing bookmarks from disk to memory"""
733 opts = _byteskwargs(opts)
733 opts = _byteskwargs(opts)
734 timer, fm = gettimer(ui, opts)
734 timer, fm = gettimer(ui, opts)
735
735
736 clearrevlogs = opts[b'clear_revlogs']
736 clearrevlogs = opts[b'clear_revlogs']
737 def s():
737 def s():
738 if clearrevlogs:
738 if clearrevlogs:
739 clearchangelog(repo)
739 clearchangelog(repo)
740 clearfilecache(repo, b'_bookmarks')
740 clearfilecache(repo, b'_bookmarks')
741 def d():
741 def d():
742 repo._bookmarks
742 repo._bookmarks
743 timer(d, setup=s)
743 timer(d, setup=s)
744 fm.end()
744 fm.end()
745
745
746 @command(b'perfbundleread', formatteropts, b'BUNDLE')
746 @command(b'perfbundleread', formatteropts, b'BUNDLE')
747 def perfbundleread(ui, repo, bundlepath, **opts):
747 def perfbundleread(ui, repo, bundlepath, **opts):
748 """Benchmark reading of bundle files.
748 """Benchmark reading of bundle files.
749
749
750 This command is meant to isolate the I/O part of bundle reading as
750 This command is meant to isolate the I/O part of bundle reading as
751 much as possible.
751 much as possible.
752 """
752 """
753 from mercurial import (
753 from mercurial import (
754 bundle2,
754 bundle2,
755 exchange,
755 exchange,
756 streamclone,
756 streamclone,
757 )
757 )
758
758
759 opts = _byteskwargs(opts)
759 opts = _byteskwargs(opts)
760
760
761 def makebench(fn):
761 def makebench(fn):
762 def run():
762 def run():
763 with open(bundlepath, b'rb') as fh:
763 with open(bundlepath, b'rb') as fh:
764 bundle = exchange.readbundle(ui, fh, bundlepath)
764 bundle = exchange.readbundle(ui, fh, bundlepath)
765 fn(bundle)
765 fn(bundle)
766
766
767 return run
767 return run
768
768
769 def makereadnbytes(size):
769 def makereadnbytes(size):
770 def run():
770 def run():
771 with open(bundlepath, b'rb') as fh:
771 with open(bundlepath, b'rb') as fh:
772 bundle = exchange.readbundle(ui, fh, bundlepath)
772 bundle = exchange.readbundle(ui, fh, bundlepath)
773 while bundle.read(size):
773 while bundle.read(size):
774 pass
774 pass
775
775
776 return run
776 return run
777
777
778 def makestdioread(size):
778 def makestdioread(size):
779 def run():
779 def run():
780 with open(bundlepath, b'rb') as fh:
780 with open(bundlepath, b'rb') as fh:
781 while fh.read(size):
781 while fh.read(size):
782 pass
782 pass
783
783
784 return run
784 return run
785
785
786 # bundle1
786 # bundle1
787
787
788 def deltaiter(bundle):
788 def deltaiter(bundle):
789 for delta in bundle.deltaiter():
789 for delta in bundle.deltaiter():
790 pass
790 pass
791
791
792 def iterchunks(bundle):
792 def iterchunks(bundle):
793 for chunk in bundle.getchunks():
793 for chunk in bundle.getchunks():
794 pass
794 pass
795
795
796 # bundle2
796 # bundle2
797
797
798 def forwardchunks(bundle):
798 def forwardchunks(bundle):
799 for chunk in bundle._forwardchunks():
799 for chunk in bundle._forwardchunks():
800 pass
800 pass
801
801
802 def iterparts(bundle):
802 def iterparts(bundle):
803 for part in bundle.iterparts():
803 for part in bundle.iterparts():
804 pass
804 pass
805
805
806 def iterpartsseekable(bundle):
806 def iterpartsseekable(bundle):
807 for part in bundle.iterparts(seekable=True):
807 for part in bundle.iterparts(seekable=True):
808 pass
808 pass
809
809
810 def seek(bundle):
810 def seek(bundle):
811 for part in bundle.iterparts(seekable=True):
811 for part in bundle.iterparts(seekable=True):
812 part.seek(0, os.SEEK_END)
812 part.seek(0, os.SEEK_END)
813
813
814 def makepartreadnbytes(size):
814 def makepartreadnbytes(size):
815 def run():
815 def run():
816 with open(bundlepath, b'rb') as fh:
816 with open(bundlepath, b'rb') as fh:
817 bundle = exchange.readbundle(ui, fh, bundlepath)
817 bundle = exchange.readbundle(ui, fh, bundlepath)
818 for part in bundle.iterparts():
818 for part in bundle.iterparts():
819 while part.read(size):
819 while part.read(size):
820 pass
820 pass
821
821
822 return run
822 return run
823
823
824 benches = [
824 benches = [
825 (makestdioread(8192), b'read(8k)'),
825 (makestdioread(8192), b'read(8k)'),
826 (makestdioread(16384), b'read(16k)'),
826 (makestdioread(16384), b'read(16k)'),
827 (makestdioread(32768), b'read(32k)'),
827 (makestdioread(32768), b'read(32k)'),
828 (makestdioread(131072), b'read(128k)'),
828 (makestdioread(131072), b'read(128k)'),
829 ]
829 ]
830
830
831 with open(bundlepath, b'rb') as fh:
831 with open(bundlepath, b'rb') as fh:
832 bundle = exchange.readbundle(ui, fh, bundlepath)
832 bundle = exchange.readbundle(ui, fh, bundlepath)
833
833
834 if isinstance(bundle, changegroup.cg1unpacker):
834 if isinstance(bundle, changegroup.cg1unpacker):
835 benches.extend([
835 benches.extend([
836 (makebench(deltaiter), b'cg1 deltaiter()'),
836 (makebench(deltaiter), b'cg1 deltaiter()'),
837 (makebench(iterchunks), b'cg1 getchunks()'),
837 (makebench(iterchunks), b'cg1 getchunks()'),
838 (makereadnbytes(8192), b'cg1 read(8k)'),
838 (makereadnbytes(8192), b'cg1 read(8k)'),
839 (makereadnbytes(16384), b'cg1 read(16k)'),
839 (makereadnbytes(16384), b'cg1 read(16k)'),
840 (makereadnbytes(32768), b'cg1 read(32k)'),
840 (makereadnbytes(32768), b'cg1 read(32k)'),
841 (makereadnbytes(131072), b'cg1 read(128k)'),
841 (makereadnbytes(131072), b'cg1 read(128k)'),
842 ])
842 ])
843 elif isinstance(bundle, bundle2.unbundle20):
843 elif isinstance(bundle, bundle2.unbundle20):
844 benches.extend([
844 benches.extend([
845 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
845 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
846 (makebench(iterparts), b'bundle2 iterparts()'),
846 (makebench(iterparts), b'bundle2 iterparts()'),
847 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
847 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
848 (makebench(seek), b'bundle2 part seek()'),
848 (makebench(seek), b'bundle2 part seek()'),
849 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
849 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
850 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
850 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
851 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
851 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
852 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
852 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
853 ])
853 ])
854 elif isinstance(bundle, streamclone.streamcloneapplier):
854 elif isinstance(bundle, streamclone.streamcloneapplier):
855 raise error.Abort(b'stream clone bundles not supported')
855 raise error.Abort(b'stream clone bundles not supported')
856 else:
856 else:
857 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
857 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
858
858
859 for fn, title in benches:
859 for fn, title in benches:
860 timer, fm = gettimer(ui, opts)
860 timer, fm = gettimer(ui, opts)
861 timer(fn, title=title)
861 timer(fn, title=title)
862 fm.end()
862 fm.end()
863
863
864 @command(b'perfchangegroupchangelog', formatteropts +
864 @command(b'perfchangegroupchangelog', formatteropts +
865 [(b'', b'cgversion', b'02', b'changegroup version'),
865 [(b'', b'cgversion', b'02', b'changegroup version'),
866 (b'r', b'rev', b'', b'revisions to add to changegroup')])
866 (b'r', b'rev', b'', b'revisions to add to changegroup')])
867 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
867 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
868 """Benchmark producing a changelog group for a changegroup.
868 """Benchmark producing a changelog group for a changegroup.
869
869
870 This measures the time spent processing the changelog during a
870 This measures the time spent processing the changelog during a
871 bundle operation. This occurs during `hg bundle` and on a server
871 bundle operation. This occurs during `hg bundle` and on a server
872 processing a `getbundle` wire protocol request (handles clones
872 processing a `getbundle` wire protocol request (handles clones
873 and pull requests).
873 and pull requests).
874
874
875 By default, all revisions are added to the changegroup.
875 By default, all revisions are added to the changegroup.
876 """
876 """
877 opts = _byteskwargs(opts)
877 opts = _byteskwargs(opts)
878 cl = repo.changelog
878 cl = repo.changelog
879 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
879 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
880 bundler = changegroup.getbundler(cgversion, repo)
880 bundler = changegroup.getbundler(cgversion, repo)
881
881
882 def d():
882 def d():
883 state, chunks = bundler._generatechangelog(cl, nodes)
883 state, chunks = bundler._generatechangelog(cl, nodes)
884 for chunk in chunks:
884 for chunk in chunks:
885 pass
885 pass
886
886
887 timer, fm = gettimer(ui, opts)
887 timer, fm = gettimer(ui, opts)
888
888
889 # Terminal printing can interfere with timing. So disable it.
889 # Terminal printing can interfere with timing. So disable it.
890 with ui.configoverride({(b'progress', b'disable'): True}):
890 with ui.configoverride({(b'progress', b'disable'): True}):
891 timer(d)
891 timer(d)
892
892
893 fm.end()
893 fm.end()
894
894
895 @command(b'perfdirs', formatteropts)
895 @command(b'perfdirs', formatteropts)
896 def perfdirs(ui, repo, **opts):
896 def perfdirs(ui, repo, **opts):
897 opts = _byteskwargs(opts)
897 opts = _byteskwargs(opts)
898 timer, fm = gettimer(ui, opts)
898 timer, fm = gettimer(ui, opts)
899 dirstate = repo.dirstate
899 dirstate = repo.dirstate
900 b'a' in dirstate
900 b'a' in dirstate
901 def d():
901 def d():
902 dirstate.hasdir(b'a')
902 dirstate.hasdir(b'a')
903 del dirstate._map._dirs
903 del dirstate._map._dirs
904 timer(d)
904 timer(d)
905 fm.end()
905 fm.end()
906
906
907 @command(b'perfdirstate', formatteropts)
907 @command(b'perfdirstate', formatteropts)
908 def perfdirstate(ui, repo, **opts):
908 def perfdirstate(ui, repo, **opts):
909 opts = _byteskwargs(opts)
909 opts = _byteskwargs(opts)
910 timer, fm = gettimer(ui, opts)
910 timer, fm = gettimer(ui, opts)
911 b"a" in repo.dirstate
911 b"a" in repo.dirstate
912 def d():
912 def d():
913 repo.dirstate.invalidate()
913 repo.dirstate.invalidate()
914 b"a" in repo.dirstate
914 b"a" in repo.dirstate
915 timer(d)
915 timer(d)
916 fm.end()
916 fm.end()
917
917
918 @command(b'perfdirstatedirs', formatteropts)
918 @command(b'perfdirstatedirs', formatteropts)
919 def perfdirstatedirs(ui, repo, **opts):
919 def perfdirstatedirs(ui, repo, **opts):
920 opts = _byteskwargs(opts)
920 opts = _byteskwargs(opts)
921 timer, fm = gettimer(ui, opts)
921 timer, fm = gettimer(ui, opts)
922 b"a" in repo.dirstate
922 b"a" in repo.dirstate
923 def d():
923 def d():
924 repo.dirstate.hasdir(b"a")
924 repo.dirstate.hasdir(b"a")
925 del repo.dirstate._map._dirs
925 del repo.dirstate._map._dirs
926 timer(d)
926 timer(d)
927 fm.end()
927 fm.end()
928
928
929 @command(b'perfdirstatefoldmap', formatteropts)
929 @command(b'perfdirstatefoldmap', formatteropts)
930 def perfdirstatefoldmap(ui, repo, **opts):
930 def perfdirstatefoldmap(ui, repo, **opts):
931 opts = _byteskwargs(opts)
931 opts = _byteskwargs(opts)
932 timer, fm = gettimer(ui, opts)
932 timer, fm = gettimer(ui, opts)
933 dirstate = repo.dirstate
933 dirstate = repo.dirstate
934 b'a' in dirstate
934 b'a' in dirstate
935 def d():
935 def d():
936 dirstate._map.filefoldmap.get(b'a')
936 dirstate._map.filefoldmap.get(b'a')
937 del dirstate._map.filefoldmap
937 del dirstate._map.filefoldmap
938 timer(d)
938 timer(d)
939 fm.end()
939 fm.end()
940
940
941 @command(b'perfdirfoldmap', formatteropts)
941 @command(b'perfdirfoldmap', formatteropts)
942 def perfdirfoldmap(ui, repo, **opts):
942 def perfdirfoldmap(ui, repo, **opts):
943 opts = _byteskwargs(opts)
943 opts = _byteskwargs(opts)
944 timer, fm = gettimer(ui, opts)
944 timer, fm = gettimer(ui, opts)
945 dirstate = repo.dirstate
945 dirstate = repo.dirstate
946 b'a' in dirstate
946 b'a' in dirstate
947 def d():
947 def d():
948 dirstate._map.dirfoldmap.get(b'a')
948 dirstate._map.dirfoldmap.get(b'a')
949 del dirstate._map.dirfoldmap
949 del dirstate._map.dirfoldmap
950 del dirstate._map._dirs
950 del dirstate._map._dirs
951 timer(d)
951 timer(d)
952 fm.end()
952 fm.end()
953
953
954 @command(b'perfdirstatewrite', formatteropts)
954 @command(b'perfdirstatewrite', formatteropts)
955 def perfdirstatewrite(ui, repo, **opts):
955 def perfdirstatewrite(ui, repo, **opts):
956 opts = _byteskwargs(opts)
956 opts = _byteskwargs(opts)
957 timer, fm = gettimer(ui, opts)
957 timer, fm = gettimer(ui, opts)
958 ds = repo.dirstate
958 ds = repo.dirstate
959 b"a" in ds
959 b"a" in ds
960 def d():
960 def d():
961 ds._dirty = True
961 ds._dirty = True
962 ds.write(repo.currenttransaction())
962 ds.write(repo.currenttransaction())
963 timer(d)
963 timer(d)
964 fm.end()
964 fm.end()
965
965
966 @command(b'perfmergecalculate',
966 @command(b'perfmergecalculate',
967 [(b'r', b'rev', b'.', b'rev to merge against')] + formatteropts)
967 [(b'r', b'rev', b'.', b'rev to merge against')] + formatteropts)
968 def perfmergecalculate(ui, repo, rev, **opts):
968 def perfmergecalculate(ui, repo, rev, **opts):
969 opts = _byteskwargs(opts)
969 opts = _byteskwargs(opts)
970 timer, fm = gettimer(ui, opts)
970 timer, fm = gettimer(ui, opts)
971 wctx = repo[None]
971 wctx = repo[None]
972 rctx = scmutil.revsingle(repo, rev, rev)
972 rctx = scmutil.revsingle(repo, rev, rev)
973 ancestor = wctx.ancestor(rctx)
973 ancestor = wctx.ancestor(rctx)
974 # we don't want working dir files to be stat'd in the benchmark, so prime
974 # we don't want working dir files to be stat'd in the benchmark, so prime
975 # that cache
975 # that cache
976 wctx.dirty()
976 wctx.dirty()
977 def d():
977 def d():
978 # acceptremote is True because we don't want prompts in the middle of
978 # acceptremote is True because we don't want prompts in the middle of
979 # our benchmark
979 # our benchmark
980 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
980 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
981 acceptremote=True, followcopies=True)
981 acceptremote=True, followcopies=True)
982 timer(d)
982 timer(d)
983 fm.end()
983 fm.end()
984
984
985 @command(b'perfpathcopies', [], b"REV REV")
985 @command(b'perfpathcopies', [], b"REV REV")
986 def perfpathcopies(ui, repo, rev1, rev2, **opts):
986 def perfpathcopies(ui, repo, rev1, rev2, **opts):
987 """benchmark the copy tracing logic"""
987 """benchmark the copy tracing logic"""
988 opts = _byteskwargs(opts)
988 opts = _byteskwargs(opts)
989 timer, fm = gettimer(ui, opts)
989 timer, fm = gettimer(ui, opts)
990 ctx1 = scmutil.revsingle(repo, rev1, rev1)
990 ctx1 = scmutil.revsingle(repo, rev1, rev1)
991 ctx2 = scmutil.revsingle(repo, rev2, rev2)
991 ctx2 = scmutil.revsingle(repo, rev2, rev2)
992 def d():
992 def d():
993 copies.pathcopies(ctx1, ctx2)
993 copies.pathcopies(ctx1, ctx2)
994 timer(d)
994 timer(d)
995 fm.end()
995 fm.end()
996
996
997 @command(b'perfphases',
997 @command(b'perfphases',
998 [(b'', b'full', False, b'include file reading time too'),
998 [(b'', b'full', False, b'include file reading time too'),
999 ], b"")
999 ], b"")
1000 def perfphases(ui, repo, **opts):
1000 def perfphases(ui, repo, **opts):
1001 """benchmark phasesets computation"""
1001 """benchmark phasesets computation"""
1002 opts = _byteskwargs(opts)
1002 opts = _byteskwargs(opts)
1003 timer, fm = gettimer(ui, opts)
1003 timer, fm = gettimer(ui, opts)
1004 _phases = repo._phasecache
1004 _phases = repo._phasecache
1005 full = opts.get(b'full')
1005 full = opts.get(b'full')
1006 def d():
1006 def d():
1007 phases = _phases
1007 phases = _phases
1008 if full:
1008 if full:
1009 clearfilecache(repo, b'_phasecache')
1009 clearfilecache(repo, b'_phasecache')
1010 phases = repo._phasecache
1010 phases = repo._phasecache
1011 phases.invalidate()
1011 phases.invalidate()
1012 phases.loadphaserevs(repo)
1012 phases.loadphaserevs(repo)
1013 timer(d)
1013 timer(d)
1014 fm.end()
1014 fm.end()
1015
1015
1016 @command(b'perfphasesremote',
1016 @command(b'perfphasesremote',
1017 [], b"[DEST]")
1017 [], b"[DEST]")
1018 def perfphasesremote(ui, repo, dest=None, **opts):
1018 def perfphasesremote(ui, repo, dest=None, **opts):
1019 """benchmark time needed to analyse phases of the remote server"""
1019 """benchmark time needed to analyse phases of the remote server"""
1020 from mercurial.node import (
1020 from mercurial.node import (
1021 bin,
1021 bin,
1022 )
1022 )
1023 from mercurial import (
1023 from mercurial import (
1024 exchange,
1024 exchange,
1025 hg,
1025 hg,
1026 phases,
1026 phases,
1027 )
1027 )
1028 opts = _byteskwargs(opts)
1028 opts = _byteskwargs(opts)
1029 timer, fm = gettimer(ui, opts)
1029 timer, fm = gettimer(ui, opts)
1030
1030
1031 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
1031 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
1032 if not path:
1032 if not path:
1033 raise error.Abort((b'default repository not configured!'),
1033 raise error.Abort((b'default repository not configured!'),
1034 hint=(b"see 'hg help config.paths'"))
1034 hint=(b"see 'hg help config.paths'"))
1035 dest = path.pushloc or path.loc
1035 dest = path.pushloc or path.loc
1036 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
1036 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
1037 other = hg.peer(repo, opts, dest)
1037 other = hg.peer(repo, opts, dest)
1038
1038
1039 # easier to perform discovery through the operation
1039 # easier to perform discovery through the operation
1040 op = exchange.pushoperation(repo, other)
1040 op = exchange.pushoperation(repo, other)
1041 exchange._pushdiscoverychangeset(op)
1041 exchange._pushdiscoverychangeset(op)
1042
1042
1043 remotesubset = op.fallbackheads
1043 remotesubset = op.fallbackheads
1044
1044
1045 with other.commandexecutor() as e:
1045 with other.commandexecutor() as e:
1046 remotephases = e.callcommand(b'listkeys',
1046 remotephases = e.callcommand(b'listkeys',
1047 {b'namespace': b'phases'}).result()
1047 {b'namespace': b'phases'}).result()
1048 del other
1048 del other
1049 publishing = remotephases.get(b'publishing', False)
1049 publishing = remotephases.get(b'publishing', False)
1050 if publishing:
1050 if publishing:
1051 ui.status((b'publishing: yes\n'))
1051 ui.status((b'publishing: yes\n'))
1052 else:
1052 else:
1053 ui.status((b'publishing: no\n'))
1053 ui.status((b'publishing: no\n'))
1054
1054
1055 nodemap = repo.changelog.nodemap
1055 nodemap = repo.changelog.nodemap
1056 nonpublishroots = 0
1056 nonpublishroots = 0
1057 for nhex, phase in remotephases.iteritems():
1057 for nhex, phase in remotephases.iteritems():
1058 if nhex == b'publishing': # ignore data related to publish option
1058 if nhex == b'publishing': # ignore data related to publish option
1059 continue
1059 continue
1060 node = bin(nhex)
1060 node = bin(nhex)
1061 if node in nodemap and int(phase):
1061 if node in nodemap and int(phase):
1062 nonpublishroots += 1
1062 nonpublishroots += 1
1063 ui.status((b'number of roots: %d\n') % len(remotephases))
1063 ui.status((b'number of roots: %d\n') % len(remotephases))
1064 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
1064 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
1065 def d():
1065 def d():
1066 phases.remotephasessummary(repo,
1066 phases.remotephasessummary(repo,
1067 remotesubset,
1067 remotesubset,
1068 remotephases)
1068 remotephases)
1069 timer(d)
1069 timer(d)
1070 fm.end()
1070 fm.end()
1071
1071
1072 @command(b'perfmanifest',[
1072 @command(b'perfmanifest',[
1073 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1073 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1074 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1074 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1075 ] + formatteropts, b'REV|NODE')
1075 ] + formatteropts, b'REV|NODE')
1076 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1076 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1077 """benchmark the time to read a manifest from disk and return a usable
1077 """benchmark the time to read a manifest from disk and return a usable
1078 dict-like object
1078 dict-like object
1079
1079
1080 Manifest caches are cleared before retrieval."""
1080 Manifest caches are cleared before retrieval."""
1081 opts = _byteskwargs(opts)
1081 opts = _byteskwargs(opts)
1082 timer, fm = gettimer(ui, opts)
1082 timer, fm = gettimer(ui, opts)
1083 if not manifest_rev:
1083 if not manifest_rev:
1084 ctx = scmutil.revsingle(repo, rev, rev)
1084 ctx = scmutil.revsingle(repo, rev, rev)
1085 t = ctx.manifestnode()
1085 t = ctx.manifestnode()
1086 else:
1086 else:
1087 from mercurial.node import bin
1087 from mercurial.node import bin
1088
1088
1089 if len(rev) == 40:
1089 if len(rev) == 40:
1090 t = bin(rev)
1090 t = bin(rev)
1091 else:
1091 else:
1092 try:
1092 try:
1093 rev = int(rev)
1093 rev = int(rev)
1094
1094
1095 if util.safehasattr(repo.manifestlog, b'getstorage'):
1095 if util.safehasattr(repo.manifestlog, b'getstorage'):
1096 t = repo.manifestlog.getstorage(b'').node(rev)
1096 t = repo.manifestlog.getstorage(b'').node(rev)
1097 else:
1097 else:
1098 t = repo.manifestlog._revlog.lookup(rev)
1098 t = repo.manifestlog._revlog.lookup(rev)
1099 except ValueError:
1099 except ValueError:
1100 raise error.Abort(b'manifest revision must be integer or full '
1100 raise error.Abort(b'manifest revision must be integer or full '
1101 b'node')
1101 b'node')
1102 def d():
1102 def d():
1103 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1103 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1104 repo.manifestlog[t].read()
1104 repo.manifestlog[t].read()
1105 timer(d)
1105 timer(d)
1106 fm.end()
1106 fm.end()
1107
1107
1108 @command(b'perfchangeset', formatteropts)
1108 @command(b'perfchangeset', formatteropts)
1109 def perfchangeset(ui, repo, rev, **opts):
1109 def perfchangeset(ui, repo, rev, **opts):
1110 opts = _byteskwargs(opts)
1110 opts = _byteskwargs(opts)
1111 timer, fm = gettimer(ui, opts)
1111 timer, fm = gettimer(ui, opts)
1112 n = scmutil.revsingle(repo, rev).node()
1112 n = scmutil.revsingle(repo, rev).node()
1113 def d():
1113 def d():
1114 repo.changelog.read(n)
1114 repo.changelog.read(n)
1115 #repo.changelog._cache = None
1115 #repo.changelog._cache = None
1116 timer(d)
1116 timer(d)
1117 fm.end()
1117 fm.end()
1118
1118
1119 @command(b'perfignore', formatteropts)
1119 @command(b'perfignore', formatteropts)
1120 def perfignore(ui, repo, **opts):
1120 def perfignore(ui, repo, **opts):
1121 """benchmark operation related to computing ignore"""
1121 """benchmark operation related to computing ignore"""
1122 opts = _byteskwargs(opts)
1122 opts = _byteskwargs(opts)
1123 timer, fm = gettimer(ui, opts)
1123 timer, fm = gettimer(ui, opts)
1124 dirstate = repo.dirstate
1124 dirstate = repo.dirstate
1125
1125
1126 def setupone():
1126 def setupone():
1127 dirstate.invalidate()
1127 dirstate.invalidate()
1128 clearfilecache(dirstate, b'_ignore')
1128 clearfilecache(dirstate, b'_ignore')
1129
1129
1130 def runone():
1130 def runone():
1131 dirstate._ignore
1131 dirstate._ignore
1132
1132
1133 timer(runone, setup=setupone, title=b"load")
1133 timer(runone, setup=setupone, title=b"load")
1134 fm.end()
1134 fm.end()
1135
1135
1136 @command(b'perfindex', [
1136 @command(b'perfindex', [
1137 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1137 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1138 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1138 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1139 ] + formatteropts)
1139 ] + formatteropts)
1140 def perfindex(ui, repo, **opts):
1140 def perfindex(ui, repo, **opts):
1141 """benchmark index creation time followed by a lookup
1141 """benchmark index creation time followed by a lookup
1142
1142
1143 The default is to look `tip` up. Depending on the index implementation,
1143 The default is to look `tip` up. Depending on the index implementation,
1144 the revision looked up can matters. For example, an implementation
1144 the revision looked up can matters. For example, an implementation
1145 scanning the index will have a faster lookup time for `--rev tip` than for
1145 scanning the index will have a faster lookup time for `--rev tip` than for
1146 `--rev 0`. The number of looked up revisions and their order can also
1146 `--rev 0`. The number of looked up revisions and their order can also
1147 matters.
1147 matters.
1148
1148
1149 Example of useful set to test:
1149 Example of useful set to test:
1150 * tip
1150 * tip
1151 * 0
1151 * 0
1152 * -10:
1152 * -10:
1153 * :10
1153 * :10
1154 * -10: + :10
1154 * -10: + :10
1155 * :10: + -10:
1155 * :10: + -10:
1156 * -10000:
1156 * -10000:
1157 * -10000: + 0
1157 * -10000: + 0
1158
1158
1159 It is not currently possible to check for lookup of a missing node. For
1159 It is not currently possible to check for lookup of a missing node. For
1160 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1160 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1161 import mercurial.revlog
1161 import mercurial.revlog
1162 opts = _byteskwargs(opts)
1162 opts = _byteskwargs(opts)
1163 timer, fm = gettimer(ui, opts)
1163 timer, fm = gettimer(ui, opts)
1164 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1164 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1165 if opts[b'no_lookup']:
1165 if opts[b'no_lookup']:
1166 if opts['rev']:
1166 if opts['rev']:
1167 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1167 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1168 nodes = []
1168 nodes = []
1169 elif not opts[b'rev']:
1169 elif not opts[b'rev']:
1170 nodes = [repo[b"tip"].node()]
1170 nodes = [repo[b"tip"].node()]
1171 else:
1171 else:
1172 revs = scmutil.revrange(repo, opts[b'rev'])
1172 revs = scmutil.revrange(repo, opts[b'rev'])
1173 cl = repo.changelog
1173 cl = repo.changelog
1174 nodes = [cl.node(r) for r in revs]
1174 nodes = [cl.node(r) for r in revs]
1175
1175
1176 unfi = repo.unfiltered()
1176 unfi = repo.unfiltered()
1177 # find the filecache func directly
1177 # find the filecache func directly
1178 # This avoid polluting the benchmark with the filecache logic
1178 # This avoid polluting the benchmark with the filecache logic
1179 makecl = unfi.__class__.changelog.func
1179 makecl = unfi.__class__.changelog.func
1180 def setup():
1180 def setup():
1181 # probably not necessary, but for good measure
1181 # probably not necessary, but for good measure
1182 clearchangelog(unfi)
1182 clearchangelog(unfi)
1183 def d():
1183 def d():
1184 cl = makecl(unfi)
1184 cl = makecl(unfi)
1185 for n in nodes:
1185 for n in nodes:
1186 cl.rev(n)
1186 cl.rev(n)
1187 timer(d, setup=setup)
1187 timer(d, setup=setup)
1188 fm.end()
1188 fm.end()
1189
1189
1190 @command(b'perfnodemap', [
1190 @command(b'perfnodemap', [
1191 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1191 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1192 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1192 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1193 ] + formatteropts)
1193 ] + formatteropts)
1194 def perfnodemap(ui, repo, **opts):
1194 def perfnodemap(ui, repo, **opts):
1195 """benchmark the time necessary to look up revision from a cold nodemap
1195 """benchmark the time necessary to look up revision from a cold nodemap
1196
1196
1197 Depending on the implementation, the amount and order of revision we look
1197 Depending on the implementation, the amount and order of revision we look
1198 up can varies. Example of useful set to test:
1198 up can varies. Example of useful set to test:
1199 * tip
1199 * tip
1200 * 0
1200 * 0
1201 * -10:
1201 * -10:
1202 * :10
1202 * :10
1203 * -10: + :10
1203 * -10: + :10
1204 * :10: + -10:
1204 * :10: + -10:
1205 * -10000:
1205 * -10000:
1206 * -10000: + 0
1206 * -10000: + 0
1207
1207
1208 The command currently focus on valid binary lookup. Benchmarking for
1208 The command currently focus on valid binary lookup. Benchmarking for
1209 hexlookup, prefix lookup and missing lookup would also be valuable.
1209 hexlookup, prefix lookup and missing lookup would also be valuable.
1210 """
1210 """
1211 import mercurial.revlog
1211 import mercurial.revlog
1212 opts = _byteskwargs(opts)
1212 opts = _byteskwargs(opts)
1213 timer, fm = gettimer(ui, opts)
1213 timer, fm = gettimer(ui, opts)
1214 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1214 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1215
1215
1216 unfi = repo.unfiltered()
1216 unfi = repo.unfiltered()
1217 clearcaches = opts['clear_caches']
1217 clearcaches = opts['clear_caches']
1218 # find the filecache func directly
1218 # find the filecache func directly
1219 # This avoid polluting the benchmark with the filecache logic
1219 # This avoid polluting the benchmark with the filecache logic
1220 makecl = unfi.__class__.changelog.func
1220 makecl = unfi.__class__.changelog.func
1221 if not opts[b'rev']:
1221 if not opts[b'rev']:
1222 raise error.Abort('use --rev to specify revisions to look up')
1222 raise error.Abort('use --rev to specify revisions to look up')
1223 revs = scmutil.revrange(repo, opts[b'rev'])
1223 revs = scmutil.revrange(repo, opts[b'rev'])
1224 cl = repo.changelog
1224 cl = repo.changelog
1225 nodes = [cl.node(r) for r in revs]
1225 nodes = [cl.node(r) for r in revs]
1226
1226
1227 # use a list to pass reference to a nodemap from one closure to the next
1227 # use a list to pass reference to a nodemap from one closure to the next
1228 nodeget = [None]
1228 nodeget = [None]
1229 def setnodeget():
1229 def setnodeget():
1230 # probably not necessary, but for good measure
1230 # probably not necessary, but for good measure
1231 clearchangelog(unfi)
1231 clearchangelog(unfi)
1232 nodeget[0] = makecl(unfi).nodemap.get
1232 nodeget[0] = makecl(unfi).nodemap.get
1233
1233
1234 def d():
1234 def d():
1235 get = nodeget[0]
1235 get = nodeget[0]
1236 for n in nodes:
1236 for n in nodes:
1237 get(n)
1237 get(n)
1238
1238
1239 setup = None
1239 setup = None
1240 if clearcaches:
1240 if clearcaches:
1241 def setup():
1241 def setup():
1242 setnodeget()
1242 setnodeget()
1243 else:
1243 else:
1244 setnodeget()
1244 setnodeget()
1245 d() # prewarm the data structure
1245 d() # prewarm the data structure
1246 timer(d, setup=setup)
1246 timer(d, setup=setup)
1247 fm.end()
1247 fm.end()
1248
1248
1249 @command(b'perfstartup', formatteropts)
1249 @command(b'perfstartup', formatteropts)
1250 def perfstartup(ui, repo, **opts):
1250 def perfstartup(ui, repo, **opts):
1251 opts = _byteskwargs(opts)
1251 opts = _byteskwargs(opts)
1252 timer, fm = gettimer(ui, opts)
1252 timer, fm = gettimer(ui, opts)
1253 def d():
1253 def d():
1254 if os.name != r'nt':
1254 if os.name != r'nt':
1255 os.system(b"HGRCPATH= %s version -q > /dev/null" %
1255 os.system(b"HGRCPATH= %s version -q > /dev/null" %
1256 fsencode(sys.argv[0]))
1256 fsencode(sys.argv[0]))
1257 else:
1257 else:
1258 os.environ[r'HGRCPATH'] = r' '
1258 os.environ[r'HGRCPATH'] = r' '
1259 os.system(r"%s version -q > NUL" % sys.argv[0])
1259 os.system(r"%s version -q > NUL" % sys.argv[0])
1260 timer(d)
1260 timer(d)
1261 fm.end()
1261 fm.end()
1262
1262
1263 @command(b'perfparents', formatteropts)
1263 @command(b'perfparents', formatteropts)
1264 def perfparents(ui, repo, **opts):
1264 def perfparents(ui, repo, **opts):
1265 """benchmark the time necessary to fetch one changeset's parents.
1265 """benchmark the time necessary to fetch one changeset's parents.
1266
1266
1267 The fetch is done using the `node identifier`, traversing all object layers
1267 The fetch is done using the `node identifier`, traversing all object layers
1268 from the repository object. The first N revisions will be used for this
1268 from the repository object. The first N revisions will be used for this
1269 benchmark. N is controlled by the ``perf.parentscount`` config option
1269 benchmark. N is controlled by the ``perf.parentscount`` config option
1270 (default: 1000).
1270 (default: 1000).
1271 """
1271 """
1272 opts = _byteskwargs(opts)
1272 opts = _byteskwargs(opts)
1273 timer, fm = gettimer(ui, opts)
1273 timer, fm = gettimer(ui, opts)
1274 # control the number of commits perfparents iterates over
1274 # control the number of commits perfparents iterates over
1275 # experimental config: perf.parentscount
1275 # experimental config: perf.parentscount
1276 count = getint(ui, b"perf", b"parentscount", 1000)
1276 count = getint(ui, b"perf", b"parentscount", 1000)
1277 if len(repo.changelog) < count:
1277 if len(repo.changelog) < count:
1278 raise error.Abort(b"repo needs %d commits for this test" % count)
1278 raise error.Abort(b"repo needs %d commits for this test" % count)
1279 repo = repo.unfiltered()
1279 repo = repo.unfiltered()
1280 nl = [repo.changelog.node(i) for i in _xrange(count)]
1280 nl = [repo.changelog.node(i) for i in _xrange(count)]
1281 def d():
1281 def d():
1282 for n in nl:
1282 for n in nl:
1283 repo.changelog.parents(n)
1283 repo.changelog.parents(n)
1284 timer(d)
1284 timer(d)
1285 fm.end()
1285 fm.end()
1286
1286
1287 @command(b'perfctxfiles', formatteropts)
1287 @command(b'perfctxfiles', formatteropts)
1288 def perfctxfiles(ui, repo, x, **opts):
1288 def perfctxfiles(ui, repo, x, **opts):
1289 opts = _byteskwargs(opts)
1289 opts = _byteskwargs(opts)
1290 x = int(x)
1290 x = int(x)
1291 timer, fm = gettimer(ui, opts)
1291 timer, fm = gettimer(ui, opts)
1292 def d():
1292 def d():
1293 len(repo[x].files())
1293 len(repo[x].files())
1294 timer(d)
1294 timer(d)
1295 fm.end()
1295 fm.end()
1296
1296
1297 @command(b'perfrawfiles', formatteropts)
1297 @command(b'perfrawfiles', formatteropts)
1298 def perfrawfiles(ui, repo, x, **opts):
1298 def perfrawfiles(ui, repo, x, **opts):
1299 opts = _byteskwargs(opts)
1299 opts = _byteskwargs(opts)
1300 x = int(x)
1300 x = int(x)
1301 timer, fm = gettimer(ui, opts)
1301 timer, fm = gettimer(ui, opts)
1302 cl = repo.changelog
1302 cl = repo.changelog
1303 def d():
1303 def d():
1304 len(cl.read(x)[3])
1304 len(cl.read(x)[3])
1305 timer(d)
1305 timer(d)
1306 fm.end()
1306 fm.end()
1307
1307
1308 @command(b'perflookup', formatteropts)
1308 @command(b'perflookup', formatteropts)
1309 def perflookup(ui, repo, rev, **opts):
1309 def perflookup(ui, repo, rev, **opts):
1310 opts = _byteskwargs(opts)
1310 opts = _byteskwargs(opts)
1311 timer, fm = gettimer(ui, opts)
1311 timer, fm = gettimer(ui, opts)
1312 timer(lambda: len(repo.lookup(rev)))
1312 timer(lambda: len(repo.lookup(rev)))
1313 fm.end()
1313 fm.end()
1314
1314
1315 @command(b'perflinelogedits',
1315 @command(b'perflinelogedits',
1316 [(b'n', b'edits', 10000, b'number of edits'),
1316 [(b'n', b'edits', 10000, b'number of edits'),
1317 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1317 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1318 ], norepo=True)
1318 ], norepo=True)
1319 def perflinelogedits(ui, **opts):
1319 def perflinelogedits(ui, **opts):
1320 from mercurial import linelog
1320 from mercurial import linelog
1321
1321
1322 opts = _byteskwargs(opts)
1322 opts = _byteskwargs(opts)
1323
1323
1324 edits = opts[b'edits']
1324 edits = opts[b'edits']
1325 maxhunklines = opts[b'max_hunk_lines']
1325 maxhunklines = opts[b'max_hunk_lines']
1326
1326
1327 maxb1 = 100000
1327 maxb1 = 100000
1328 random.seed(0)
1328 random.seed(0)
1329 randint = random.randint
1329 randint = random.randint
1330 currentlines = 0
1330 currentlines = 0
1331 arglist = []
1331 arglist = []
1332 for rev in _xrange(edits):
1332 for rev in _xrange(edits):
1333 a1 = randint(0, currentlines)
1333 a1 = randint(0, currentlines)
1334 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1334 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1335 b1 = randint(0, maxb1)
1335 b1 = randint(0, maxb1)
1336 b2 = randint(b1, b1 + maxhunklines)
1336 b2 = randint(b1, b1 + maxhunklines)
1337 currentlines += (b2 - b1) - (a2 - a1)
1337 currentlines += (b2 - b1) - (a2 - a1)
1338 arglist.append((rev, a1, a2, b1, b2))
1338 arglist.append((rev, a1, a2, b1, b2))
1339
1339
1340 def d():
1340 def d():
1341 ll = linelog.linelog()
1341 ll = linelog.linelog()
1342 for args in arglist:
1342 for args in arglist:
1343 ll.replacelines(*args)
1343 ll.replacelines(*args)
1344
1344
1345 timer, fm = gettimer(ui, opts)
1345 timer, fm = gettimer(ui, opts)
1346 timer(d)
1346 timer(d)
1347 fm.end()
1347 fm.end()
1348
1348
1349 @command(b'perfrevrange', formatteropts)
1349 @command(b'perfrevrange', formatteropts)
1350 def perfrevrange(ui, repo, *specs, **opts):
1350 def perfrevrange(ui, repo, *specs, **opts):
1351 opts = _byteskwargs(opts)
1351 opts = _byteskwargs(opts)
1352 timer, fm = gettimer(ui, opts)
1352 timer, fm = gettimer(ui, opts)
1353 revrange = scmutil.revrange
1353 revrange = scmutil.revrange
1354 timer(lambda: len(revrange(repo, specs)))
1354 timer(lambda: len(revrange(repo, specs)))
1355 fm.end()
1355 fm.end()
1356
1356
1357 @command(b'perfnodelookup', formatteropts)
1357 @command(b'perfnodelookup', formatteropts)
1358 def perfnodelookup(ui, repo, rev, **opts):
1358 def perfnodelookup(ui, repo, rev, **opts):
1359 opts = _byteskwargs(opts)
1359 opts = _byteskwargs(opts)
1360 timer, fm = gettimer(ui, opts)
1360 timer, fm = gettimer(ui, opts)
1361 import mercurial.revlog
1361 import mercurial.revlog
1362 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1362 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1363 n = scmutil.revsingle(repo, rev).node()
1363 n = scmutil.revsingle(repo, rev).node()
1364 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1364 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1365 def d():
1365 def d():
1366 cl.rev(n)
1366 cl.rev(n)
1367 clearcaches(cl)
1367 clearcaches(cl)
1368 timer(d)
1368 timer(d)
1369 fm.end()
1369 fm.end()
1370
1370
1371 @command(b'perflog',
1371 @command(b'perflog',
1372 [(b'', b'rename', False, b'ask log to follow renames')
1372 [(b'', b'rename', False, b'ask log to follow renames')
1373 ] + formatteropts)
1373 ] + formatteropts)
1374 def perflog(ui, repo, rev=None, **opts):
1374 def perflog(ui, repo, rev=None, **opts):
1375 opts = _byteskwargs(opts)
1375 opts = _byteskwargs(opts)
1376 if rev is None:
1376 if rev is None:
1377 rev=[]
1377 rev=[]
1378 timer, fm = gettimer(ui, opts)
1378 timer, fm = gettimer(ui, opts)
1379 ui.pushbuffer()
1379 ui.pushbuffer()
1380 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1380 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1381 copies=opts.get(b'rename')))
1381 copies=opts.get(b'rename')))
1382 ui.popbuffer()
1382 ui.popbuffer()
1383 fm.end()
1383 fm.end()
1384
1384
1385 @command(b'perfmoonwalk', formatteropts)
1385 @command(b'perfmoonwalk', formatteropts)
1386 def perfmoonwalk(ui, repo, **opts):
1386 def perfmoonwalk(ui, repo, **opts):
1387 """benchmark walking the changelog backwards
1387 """benchmark walking the changelog backwards
1388
1388
1389 This also loads the changelog data for each revision in the changelog.
1389 This also loads the changelog data for each revision in the changelog.
1390 """
1390 """
1391 opts = _byteskwargs(opts)
1391 opts = _byteskwargs(opts)
1392 timer, fm = gettimer(ui, opts)
1392 timer, fm = gettimer(ui, opts)
1393 def moonwalk():
1393 def moonwalk():
1394 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1394 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1395 ctx = repo[i]
1395 ctx = repo[i]
1396 ctx.branch() # read changelog data (in addition to the index)
1396 ctx.branch() # read changelog data (in addition to the index)
1397 timer(moonwalk)
1397 timer(moonwalk)
1398 fm.end()
1398 fm.end()
1399
1399
1400 @command(b'perftemplating',
1400 @command(b'perftemplating',
1401 [(b'r', b'rev', [], b'revisions to run the template on'),
1401 [(b'r', b'rev', [], b'revisions to run the template on'),
1402 ] + formatteropts)
1402 ] + formatteropts)
1403 def perftemplating(ui, repo, testedtemplate=None, **opts):
1403 def perftemplating(ui, repo, testedtemplate=None, **opts):
1404 """test the rendering time of a given template"""
1404 """test the rendering time of a given template"""
1405 if makelogtemplater is None:
1405 if makelogtemplater is None:
1406 raise error.Abort((b"perftemplating not available with this Mercurial"),
1406 raise error.Abort((b"perftemplating not available with this Mercurial"),
1407 hint=b"use 4.3 or later")
1407 hint=b"use 4.3 or later")
1408
1408
1409 opts = _byteskwargs(opts)
1409 opts = _byteskwargs(opts)
1410
1410
1411 nullui = ui.copy()
1411 nullui = ui.copy()
1412 nullui.fout = open(os.devnull, r'wb')
1412 nullui.fout = open(os.devnull, r'wb')
1413 nullui.disablepager()
1413 nullui.disablepager()
1414 revs = opts.get(b'rev')
1414 revs = opts.get(b'rev')
1415 if not revs:
1415 if not revs:
1416 revs = [b'all()']
1416 revs = [b'all()']
1417 revs = list(scmutil.revrange(repo, revs))
1417 revs = list(scmutil.revrange(repo, revs))
1418
1418
1419 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1419 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1420 b' {author|person}: {desc|firstline}\n')
1420 b' {author|person}: {desc|firstline}\n')
1421 if testedtemplate is None:
1421 if testedtemplate is None:
1422 testedtemplate = defaulttemplate
1422 testedtemplate = defaulttemplate
1423 displayer = makelogtemplater(nullui, repo, testedtemplate)
1423 displayer = makelogtemplater(nullui, repo, testedtemplate)
1424 def format():
1424 def format():
1425 for r in revs:
1425 for r in revs:
1426 ctx = repo[r]
1426 ctx = repo[r]
1427 displayer.show(ctx)
1427 displayer.show(ctx)
1428 displayer.flush(ctx)
1428 displayer.flush(ctx)
1429
1429
1430 timer, fm = gettimer(ui, opts)
1430 timer, fm = gettimer(ui, opts)
1431 timer(format)
1431 timer(format)
1432 fm.end()
1432 fm.end()
1433
1433
1434 @command(b'perfhelper-pathcopies', formatteropts +
1434 @command(b'perfhelper-pathcopies', formatteropts +
1435 [
1435 [
1436 (b'r', b'revs', [], b'restrict search to these revisions'),
1436 (b'r', b'revs', [], b'restrict search to these revisions'),
1437 (b'', b'timing', False, b'provides extra data (costly)'),
1437 (b'', b'timing', False, b'provides extra data (costly)'),
1438 ])
1438 ])
1439 def perfhelperpathcopies(ui, repo, revs=[], **opts):
1439 def perfhelperpathcopies(ui, repo, revs=[], **opts):
1440 """find statistic about potential parameters for the `perftracecopies`
1440 """find statistic about potential parameters for the `perftracecopies`
1441
1441
1442 This command find source-destination pair relevant for copytracing testing.
1442 This command find source-destination pair relevant for copytracing testing.
1443 It report value for some of the parameters that impact copy tracing time.
1443 It report value for some of the parameters that impact copy tracing time.
1444
1444
1445 If `--timing` is set, rename detection is run and the associated timing
1445 If `--timing` is set, rename detection is run and the associated timing
1446 will be reported. The extra details comes at the cost of a slower command
1446 will be reported. The extra details comes at the cost of a slower command
1447 execution.
1447 execution.
1448
1448
1449 Since the rename detection is only run once, other factors might easily
1449 Since the rename detection is only run once, other factors might easily
1450 affect the precision of the timing. However it should give a good
1450 affect the precision of the timing. However it should give a good
1451 approximation of which revision pairs are very costly.
1451 approximation of which revision pairs are very costly.
1452 """
1452 """
1453 opts = _byteskwargs(opts)
1453 opts = _byteskwargs(opts)
1454 fm = ui.formatter(b'perf', opts)
1454 fm = ui.formatter(b'perf', opts)
1455 dotiming = opts[b'timing']
1455 dotiming = opts[b'timing']
1456
1456
1457 if dotiming:
1457 if dotiming:
1458 header = '%12s %12s %12s %12s %12s %12s\n'
1458 header = '%12s %12s %12s %12s %12s %12s\n'
1459 output = ("%(source)12s %(destination)12s "
1459 output = ("%(source)12s %(destination)12s "
1460 "%(nbrevs)12d %(nbmissingfiles)12d "
1460 "%(nbrevs)12d %(nbmissingfiles)12d "
1461 "%(nbrenamedfiles)12d %(time)18.5f\n")
1461 "%(nbrenamedfiles)12d %(time)18.5f\n")
1462 header_names = ("source", "destination", "nb-revs", "nb-files",
1462 header_names = ("source", "destination", "nb-revs", "nb-files",
1463 "nb-renames", "time")
1463 "nb-renames", "time")
1464 fm.plain(header % header_names)
1464 fm.plain(header % header_names)
1465 else:
1465 else:
1466 header = '%12s %12s %12s %12s\n'
1466 header = '%12s %12s %12s %12s\n'
1467 output = ("%(source)12s %(destination)12s "
1467 output = ("%(source)12s %(destination)12s "
1468 "%(nbrevs)12d %(nbmissingfiles)12d\n")
1468 "%(nbrevs)12d %(nbmissingfiles)12d\n")
1469 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
1469 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
1470
1470
1471 if not revs:
1471 if not revs:
1472 revs = ['all()']
1472 revs = ['all()']
1473 revs = scmutil.revrange(repo, revs)
1473 revs = scmutil.revrange(repo, revs)
1474
1474
1475 roi = repo.revs('merge() and %ld', revs)
1475 roi = repo.revs('merge() and %ld', revs)
1476 for r in roi:
1476 for r in roi:
1477 ctx = repo[r]
1477 ctx = repo[r]
1478 p1 = ctx.p1().rev()
1478 p1 = ctx.p1().rev()
1479 p2 = ctx.p2().rev()
1479 p2 = ctx.p2().rev()
1480 bases = repo.changelog._commonancestorsheads(p1, p2)
1480 bases = repo.changelog._commonancestorsheads(p1, p2)
1481 for p in (p1, p2):
1481 for p in (p1, p2):
1482 for b in bases:
1482 for b in bases:
1483 base = repo[b]
1483 base = repo[b]
1484 parent = repo[p]
1484 parent = repo[p]
1485 missing = copies._computeforwardmissing(base, parent)
1485 missing = copies._computeforwardmissing(base, parent)
1486 if not missing:
1486 if not missing:
1487 continue
1487 continue
1488 data = {
1488 data = {
1489 b'source': base.hex(),
1489 b'source': base.hex(),
1490 b'destination': parent.hex(),
1490 b'destination': parent.hex(),
1491 b'nbrevs': len(repo.revs('%d::%d', b, p)),
1491 b'nbrevs': len(repo.revs('%d::%d', b, p)),
1492 b'nbmissingfiles': len(missing),
1492 b'nbmissingfiles': len(missing),
1493 }
1493 }
1494 if dotiming:
1494 if dotiming:
1495 begin = util.timer()
1495 begin = util.timer()
1496 renames = copies.pathcopies(base, parent)
1496 renames = copies.pathcopies(base, parent)
1497 end = util.timer()
1497 end = util.timer()
1498 # not very stable timing since we did only one run
1498 # not very stable timing since we did only one run
1499 data['time'] = end - begin
1499 data['time'] = end - begin
1500 data['nbrenamedfiles'] = len(renames)
1500 data['nbrenamedfiles'] = len(renames)
1501 fm.startitem()
1501 fm.startitem()
1502 fm.data(**data)
1502 fm.data(**data)
1503 out = data.copy()
1503 out = data.copy()
1504 out['source'] = fm.hexfunc(base.node())
1504 out['source'] = fm.hexfunc(base.node())
1505 out['destination'] = fm.hexfunc(parent.node())
1505 out['destination'] = fm.hexfunc(parent.node())
1506 fm.plain(output % out)
1506 fm.plain(output % out)
1507
1507
1508 fm.end()
1508 fm.end()
1509
1509
1510 @command(b'perfcca', formatteropts)
1510 @command(b'perfcca', formatteropts)
1511 def perfcca(ui, repo, **opts):
1511 def perfcca(ui, repo, **opts):
1512 opts = _byteskwargs(opts)
1512 opts = _byteskwargs(opts)
1513 timer, fm = gettimer(ui, opts)
1513 timer, fm = gettimer(ui, opts)
1514 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1514 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1515 fm.end()
1515 fm.end()
1516
1516
1517 @command(b'perffncacheload', formatteropts)
1517 @command(b'perffncacheload', formatteropts)
1518 def perffncacheload(ui, repo, **opts):
1518 def perffncacheload(ui, repo, **opts):
1519 opts = _byteskwargs(opts)
1519 opts = _byteskwargs(opts)
1520 timer, fm = gettimer(ui, opts)
1520 timer, fm = gettimer(ui, opts)
1521 s = repo.store
1521 s = repo.store
1522 def d():
1522 def d():
1523 s.fncache._load()
1523 s.fncache._load()
1524 timer(d)
1524 timer(d)
1525 fm.end()
1525 fm.end()
1526
1526
1527 @command(b'perffncachewrite', formatteropts)
1527 @command(b'perffncachewrite', formatteropts)
1528 def perffncachewrite(ui, repo, **opts):
1528 def perffncachewrite(ui, repo, **opts):
1529 opts = _byteskwargs(opts)
1529 opts = _byteskwargs(opts)
1530 timer, fm = gettimer(ui, opts)
1530 timer, fm = gettimer(ui, opts)
1531 s = repo.store
1531 s = repo.store
1532 lock = repo.lock()
1532 lock = repo.lock()
1533 s.fncache._load()
1533 s.fncache._load()
1534 tr = repo.transaction(b'perffncachewrite')
1534 tr = repo.transaction(b'perffncachewrite')
1535 tr.addbackup(b'fncache')
1535 tr.addbackup(b'fncache')
1536 def d():
1536 def d():
1537 s.fncache._dirty = True
1537 s.fncache._dirty = True
1538 s.fncache.write(tr)
1538 s.fncache.write(tr)
1539 timer(d)
1539 timer(d)
1540 tr.close()
1540 tr.close()
1541 lock.release()
1541 lock.release()
1542 fm.end()
1542 fm.end()
1543
1543
1544 @command(b'perffncacheencode', formatteropts)
1544 @command(b'perffncacheencode', formatteropts)
1545 def perffncacheencode(ui, repo, **opts):
1545 def perffncacheencode(ui, repo, **opts):
1546 opts = _byteskwargs(opts)
1546 opts = _byteskwargs(opts)
1547 timer, fm = gettimer(ui, opts)
1547 timer, fm = gettimer(ui, opts)
1548 s = repo.store
1548 s = repo.store
1549 s.fncache._load()
1549 s.fncache._load()
1550 def d():
1550 def d():
1551 for p in s.fncache.entries:
1551 for p in s.fncache.entries:
1552 s.encode(p)
1552 s.encode(p)
1553 timer(d)
1553 timer(d)
1554 fm.end()
1554 fm.end()
1555
1555
1556 def _bdiffworker(q, blocks, xdiff, ready, done):
1556 def _bdiffworker(q, blocks, xdiff, ready, done):
1557 while not done.is_set():
1557 while not done.is_set():
1558 pair = q.get()
1558 pair = q.get()
1559 while pair is not None:
1559 while pair is not None:
1560 if xdiff:
1560 if xdiff:
1561 mdiff.bdiff.xdiffblocks(*pair)
1561 mdiff.bdiff.xdiffblocks(*pair)
1562 elif blocks:
1562 elif blocks:
1563 mdiff.bdiff.blocks(*pair)
1563 mdiff.bdiff.blocks(*pair)
1564 else:
1564 else:
1565 mdiff.textdiff(*pair)
1565 mdiff.textdiff(*pair)
1566 q.task_done()
1566 q.task_done()
1567 pair = q.get()
1567 pair = q.get()
1568 q.task_done() # for the None one
1568 q.task_done() # for the None one
1569 with ready:
1569 with ready:
1570 ready.wait()
1570 ready.wait()
1571
1571
1572 def _manifestrevision(repo, mnode):
1572 def _manifestrevision(repo, mnode):
1573 ml = repo.manifestlog
1573 ml = repo.manifestlog
1574
1574
1575 if util.safehasattr(ml, b'getstorage'):
1575 if util.safehasattr(ml, b'getstorage'):
1576 store = ml.getstorage(b'')
1576 store = ml.getstorage(b'')
1577 else:
1577 else:
1578 store = ml._revlog
1578 store = ml._revlog
1579
1579
1580 return store.revision(mnode)
1580 return store.revision(mnode)
1581
1581
1582 @command(b'perfbdiff', revlogopts + formatteropts + [
1582 @command(b'perfbdiff', revlogopts + formatteropts + [
1583 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1583 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1584 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1584 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1585 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1585 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1586 (b'', b'blocks', False, b'test computing diffs into blocks'),
1586 (b'', b'blocks', False, b'test computing diffs into blocks'),
1587 (b'', b'xdiff', False, b'use xdiff algorithm'),
1587 (b'', b'xdiff', False, b'use xdiff algorithm'),
1588 ],
1588 ],
1589
1589
1590 b'-c|-m|FILE REV')
1590 b'-c|-m|FILE REV')
1591 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1591 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1592 """benchmark a bdiff between revisions
1592 """benchmark a bdiff between revisions
1593
1593
1594 By default, benchmark a bdiff between its delta parent and itself.
1594 By default, benchmark a bdiff between its delta parent and itself.
1595
1595
1596 With ``--count``, benchmark bdiffs between delta parents and self for N
1596 With ``--count``, benchmark bdiffs between delta parents and self for N
1597 revisions starting at the specified revision.
1597 revisions starting at the specified revision.
1598
1598
1599 With ``--alldata``, assume the requested revision is a changeset and
1599 With ``--alldata``, assume the requested revision is a changeset and
1600 measure bdiffs for all changes related to that changeset (manifest
1600 measure bdiffs for all changes related to that changeset (manifest
1601 and filelogs).
1601 and filelogs).
1602 """
1602 """
1603 opts = _byteskwargs(opts)
1603 opts = _byteskwargs(opts)
1604
1604
1605 if opts[b'xdiff'] and not opts[b'blocks']:
1605 if opts[b'xdiff'] and not opts[b'blocks']:
1606 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1606 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1607
1607
1608 if opts[b'alldata']:
1608 if opts[b'alldata']:
1609 opts[b'changelog'] = True
1609 opts[b'changelog'] = True
1610
1610
1611 if opts.get(b'changelog') or opts.get(b'manifest'):
1611 if opts.get(b'changelog') or opts.get(b'manifest'):
1612 file_, rev = None, file_
1612 file_, rev = None, file_
1613 elif rev is None:
1613 elif rev is None:
1614 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1614 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1615
1615
1616 blocks = opts[b'blocks']
1616 blocks = opts[b'blocks']
1617 xdiff = opts[b'xdiff']
1617 xdiff = opts[b'xdiff']
1618 textpairs = []
1618 textpairs = []
1619
1619
1620 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1620 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1621
1621
1622 startrev = r.rev(r.lookup(rev))
1622 startrev = r.rev(r.lookup(rev))
1623 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1623 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1624 if opts[b'alldata']:
1624 if opts[b'alldata']:
1625 # Load revisions associated with changeset.
1625 # Load revisions associated with changeset.
1626 ctx = repo[rev]
1626 ctx = repo[rev]
1627 mtext = _manifestrevision(repo, ctx.manifestnode())
1627 mtext = _manifestrevision(repo, ctx.manifestnode())
1628 for pctx in ctx.parents():
1628 for pctx in ctx.parents():
1629 pman = _manifestrevision(repo, pctx.manifestnode())
1629 pman = _manifestrevision(repo, pctx.manifestnode())
1630 textpairs.append((pman, mtext))
1630 textpairs.append((pman, mtext))
1631
1631
1632 # Load filelog revisions by iterating manifest delta.
1632 # Load filelog revisions by iterating manifest delta.
1633 man = ctx.manifest()
1633 man = ctx.manifest()
1634 pman = ctx.p1().manifest()
1634 pman = ctx.p1().manifest()
1635 for filename, change in pman.diff(man).items():
1635 for filename, change in pman.diff(man).items():
1636 fctx = repo.file(filename)
1636 fctx = repo.file(filename)
1637 f1 = fctx.revision(change[0][0] or -1)
1637 f1 = fctx.revision(change[0][0] or -1)
1638 f2 = fctx.revision(change[1][0] or -1)
1638 f2 = fctx.revision(change[1][0] or -1)
1639 textpairs.append((f1, f2))
1639 textpairs.append((f1, f2))
1640 else:
1640 else:
1641 dp = r.deltaparent(rev)
1641 dp = r.deltaparent(rev)
1642 textpairs.append((r.revision(dp), r.revision(rev)))
1642 textpairs.append((r.revision(dp), r.revision(rev)))
1643
1643
1644 withthreads = threads > 0
1644 withthreads = threads > 0
1645 if not withthreads:
1645 if not withthreads:
1646 def d():
1646 def d():
1647 for pair in textpairs:
1647 for pair in textpairs:
1648 if xdiff:
1648 if xdiff:
1649 mdiff.bdiff.xdiffblocks(*pair)
1649 mdiff.bdiff.xdiffblocks(*pair)
1650 elif blocks:
1650 elif blocks:
1651 mdiff.bdiff.blocks(*pair)
1651 mdiff.bdiff.blocks(*pair)
1652 else:
1652 else:
1653 mdiff.textdiff(*pair)
1653 mdiff.textdiff(*pair)
1654 else:
1654 else:
1655 q = queue()
1655 q = queue()
1656 for i in _xrange(threads):
1656 for i in _xrange(threads):
1657 q.put(None)
1657 q.put(None)
1658 ready = threading.Condition()
1658 ready = threading.Condition()
1659 done = threading.Event()
1659 done = threading.Event()
1660 for i in _xrange(threads):
1660 for i in _xrange(threads):
1661 threading.Thread(target=_bdiffworker,
1661 threading.Thread(target=_bdiffworker,
1662 args=(q, blocks, xdiff, ready, done)).start()
1662 args=(q, blocks, xdiff, ready, done)).start()
1663 q.join()
1663 q.join()
1664 def d():
1664 def d():
1665 for pair in textpairs:
1665 for pair in textpairs:
1666 q.put(pair)
1666 q.put(pair)
1667 for i in _xrange(threads):
1667 for i in _xrange(threads):
1668 q.put(None)
1668 q.put(None)
1669 with ready:
1669 with ready:
1670 ready.notify_all()
1670 ready.notify_all()
1671 q.join()
1671 q.join()
1672 timer, fm = gettimer(ui, opts)
1672 timer, fm = gettimer(ui, opts)
1673 timer(d)
1673 timer(d)
1674 fm.end()
1674 fm.end()
1675
1675
1676 if withthreads:
1676 if withthreads:
1677 done.set()
1677 done.set()
1678 for i in _xrange(threads):
1678 for i in _xrange(threads):
1679 q.put(None)
1679 q.put(None)
1680 with ready:
1680 with ready:
1681 ready.notify_all()
1681 ready.notify_all()
1682
1682
1683 @command(b'perfunidiff', revlogopts + formatteropts + [
1683 @command(b'perfunidiff', revlogopts + formatteropts + [
1684 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1684 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1685 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
1685 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
1686 ], b'-c|-m|FILE REV')
1686 ], b'-c|-m|FILE REV')
1687 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1687 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1688 """benchmark a unified diff between revisions
1688 """benchmark a unified diff between revisions
1689
1689
1690 This doesn't include any copy tracing - it's just a unified diff
1690 This doesn't include any copy tracing - it's just a unified diff
1691 of the texts.
1691 of the texts.
1692
1692
1693 By default, benchmark a diff between its delta parent and itself.
1693 By default, benchmark a diff between its delta parent and itself.
1694
1694
1695 With ``--count``, benchmark diffs between delta parents and self for N
1695 With ``--count``, benchmark diffs between delta parents and self for N
1696 revisions starting at the specified revision.
1696 revisions starting at the specified revision.
1697
1697
1698 With ``--alldata``, assume the requested revision is a changeset and
1698 With ``--alldata``, assume the requested revision is a changeset and
1699 measure diffs for all changes related to that changeset (manifest
1699 measure diffs for all changes related to that changeset (manifest
1700 and filelogs).
1700 and filelogs).
1701 """
1701 """
1702 opts = _byteskwargs(opts)
1702 opts = _byteskwargs(opts)
1703 if opts[b'alldata']:
1703 if opts[b'alldata']:
1704 opts[b'changelog'] = True
1704 opts[b'changelog'] = True
1705
1705
1706 if opts.get(b'changelog') or opts.get(b'manifest'):
1706 if opts.get(b'changelog') or opts.get(b'manifest'):
1707 file_, rev = None, file_
1707 file_, rev = None, file_
1708 elif rev is None:
1708 elif rev is None:
1709 raise error.CommandError(b'perfunidiff', b'invalid arguments')
1709 raise error.CommandError(b'perfunidiff', b'invalid arguments')
1710
1710
1711 textpairs = []
1711 textpairs = []
1712
1712
1713 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
1713 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
1714
1714
1715 startrev = r.rev(r.lookup(rev))
1715 startrev = r.rev(r.lookup(rev))
1716 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1716 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1717 if opts[b'alldata']:
1717 if opts[b'alldata']:
1718 # Load revisions associated with changeset.
1718 # Load revisions associated with changeset.
1719 ctx = repo[rev]
1719 ctx = repo[rev]
1720 mtext = _manifestrevision(repo, ctx.manifestnode())
1720 mtext = _manifestrevision(repo, ctx.manifestnode())
1721 for pctx in ctx.parents():
1721 for pctx in ctx.parents():
1722 pman = _manifestrevision(repo, pctx.manifestnode())
1722 pman = _manifestrevision(repo, pctx.manifestnode())
1723 textpairs.append((pman, mtext))
1723 textpairs.append((pman, mtext))
1724
1724
1725 # Load filelog revisions by iterating manifest delta.
1725 # Load filelog revisions by iterating manifest delta.
1726 man = ctx.manifest()
1726 man = ctx.manifest()
1727 pman = ctx.p1().manifest()
1727 pman = ctx.p1().manifest()
1728 for filename, change in pman.diff(man).items():
1728 for filename, change in pman.diff(man).items():
1729 fctx = repo.file(filename)
1729 fctx = repo.file(filename)
1730 f1 = fctx.revision(change[0][0] or -1)
1730 f1 = fctx.revision(change[0][0] or -1)
1731 f2 = fctx.revision(change[1][0] or -1)
1731 f2 = fctx.revision(change[1][0] or -1)
1732 textpairs.append((f1, f2))
1732 textpairs.append((f1, f2))
1733 else:
1733 else:
1734 dp = r.deltaparent(rev)
1734 dp = r.deltaparent(rev)
1735 textpairs.append((r.revision(dp), r.revision(rev)))
1735 textpairs.append((r.revision(dp), r.revision(rev)))
1736
1736
1737 def d():
1737 def d():
1738 for left, right in textpairs:
1738 for left, right in textpairs:
1739 # The date strings don't matter, so we pass empty strings.
1739 # The date strings don't matter, so we pass empty strings.
1740 headerlines, hunks = mdiff.unidiff(
1740 headerlines, hunks = mdiff.unidiff(
1741 left, b'', right, b'', b'left', b'right', binary=False)
1741 left, b'', right, b'', b'left', b'right', binary=False)
1742 # consume iterators in roughly the way patch.py does
1742 # consume iterators in roughly the way patch.py does
1743 b'\n'.join(headerlines)
1743 b'\n'.join(headerlines)
1744 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1744 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1745 timer, fm = gettimer(ui, opts)
1745 timer, fm = gettimer(ui, opts)
1746 timer(d)
1746 timer(d)
1747 fm.end()
1747 fm.end()
1748
1748
1749 @command(b'perfdiffwd', formatteropts)
1749 @command(b'perfdiffwd', formatteropts)
1750 def perfdiffwd(ui, repo, **opts):
1750 def perfdiffwd(ui, repo, **opts):
1751 """Profile diff of working directory changes"""
1751 """Profile diff of working directory changes"""
1752 opts = _byteskwargs(opts)
1752 opts = _byteskwargs(opts)
1753 timer, fm = gettimer(ui, opts)
1753 timer, fm = gettimer(ui, opts)
1754 options = {
1754 options = {
1755 'w': 'ignore_all_space',
1755 'w': 'ignore_all_space',
1756 'b': 'ignore_space_change',
1756 'b': 'ignore_space_change',
1757 'B': 'ignore_blank_lines',
1757 'B': 'ignore_blank_lines',
1758 }
1758 }
1759
1759
1760 for diffopt in ('', 'w', 'b', 'B', 'wB'):
1760 for diffopt in ('', 'w', 'b', 'B', 'wB'):
1761 opts = dict((options[c], b'1') for c in diffopt)
1761 opts = dict((options[c], b'1') for c in diffopt)
1762 def d():
1762 def d():
1763 ui.pushbuffer()
1763 ui.pushbuffer()
1764 commands.diff(ui, repo, **opts)
1764 commands.diff(ui, repo, **opts)
1765 ui.popbuffer()
1765 ui.popbuffer()
1766 diffopt = diffopt.encode('ascii')
1766 diffopt = diffopt.encode('ascii')
1767 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
1767 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
1768 timer(d, title=title)
1768 timer(d, title=title)
1769 fm.end()
1769 fm.end()
1770
1770
1771 @command(b'perfrevlogindex', revlogopts + formatteropts,
1771 @command(b'perfrevlogindex', revlogopts + formatteropts,
1772 b'-c|-m|FILE')
1772 b'-c|-m|FILE')
1773 def perfrevlogindex(ui, repo, file_=None, **opts):
1773 def perfrevlogindex(ui, repo, file_=None, **opts):
1774 """Benchmark operations against a revlog index.
1774 """Benchmark operations against a revlog index.
1775
1775
1776 This tests constructing a revlog instance, reading index data,
1776 This tests constructing a revlog instance, reading index data,
1777 parsing index data, and performing various operations related to
1777 parsing index data, and performing various operations related to
1778 index data.
1778 index data.
1779 """
1779 """
1780
1780
1781 opts = _byteskwargs(opts)
1781 opts = _byteskwargs(opts)
1782
1782
1783 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
1783 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
1784
1784
1785 opener = getattr(rl, 'opener') # trick linter
1785 opener = getattr(rl, 'opener') # trick linter
1786 indexfile = rl.indexfile
1786 indexfile = rl.indexfile
1787 data = opener.read(indexfile)
1787 data = opener.read(indexfile)
1788
1788
1789 header = struct.unpack(b'>I', data[0:4])[0]
1789 header = struct.unpack(b'>I', data[0:4])[0]
1790 version = header & 0xFFFF
1790 version = header & 0xFFFF
1791 if version == 1:
1791 if version == 1:
1792 revlogio = revlog.revlogio()
1792 revlogio = revlog.revlogio()
1793 inline = header & (1 << 16)
1793 inline = header & (1 << 16)
1794 else:
1794 else:
1795 raise error.Abort((b'unsupported revlog version: %d') % version)
1795 raise error.Abort((b'unsupported revlog version: %d') % version)
1796
1796
1797 rllen = len(rl)
1797 rllen = len(rl)
1798
1798
1799 node0 = rl.node(0)
1799 node0 = rl.node(0)
1800 node25 = rl.node(rllen // 4)
1800 node25 = rl.node(rllen // 4)
1801 node50 = rl.node(rllen // 2)
1801 node50 = rl.node(rllen // 2)
1802 node75 = rl.node(rllen // 4 * 3)
1802 node75 = rl.node(rllen // 4 * 3)
1803 node100 = rl.node(rllen - 1)
1803 node100 = rl.node(rllen - 1)
1804
1804
1805 allrevs = range(rllen)
1805 allrevs = range(rllen)
1806 allrevsrev = list(reversed(allrevs))
1806 allrevsrev = list(reversed(allrevs))
1807 allnodes = [rl.node(rev) for rev in range(rllen)]
1807 allnodes = [rl.node(rev) for rev in range(rllen)]
1808 allnodesrev = list(reversed(allnodes))
1808 allnodesrev = list(reversed(allnodes))
1809
1809
1810 def constructor():
1810 def constructor():
1811 revlog.revlog(opener, indexfile)
1811 revlog.revlog(opener, indexfile)
1812
1812
1813 def read():
1813 def read():
1814 with opener(indexfile) as fh:
1814 with opener(indexfile) as fh:
1815 fh.read()
1815 fh.read()
1816
1816
1817 def parseindex():
1817 def parseindex():
1818 revlogio.parseindex(data, inline)
1818 revlogio.parseindex(data, inline)
1819
1819
1820 def getentry(revornode):
1820 def getentry(revornode):
1821 index = revlogio.parseindex(data, inline)[0]
1821 index = revlogio.parseindex(data, inline)[0]
1822 index[revornode]
1822 index[revornode]
1823
1823
1824 def getentries(revs, count=1):
1824 def getentries(revs, count=1):
1825 index = revlogio.parseindex(data, inline)[0]
1825 index = revlogio.parseindex(data, inline)[0]
1826
1826
1827 for i in range(count):
1827 for i in range(count):
1828 for rev in revs:
1828 for rev in revs:
1829 index[rev]
1829 index[rev]
1830
1830
1831 def resolvenode(node):
1831 def resolvenode(node):
1832 nodemap = revlogio.parseindex(data, inline)[1]
1832 nodemap = revlogio.parseindex(data, inline)[1]
1833 # This only works for the C code.
1833 # This only works for the C code.
1834 if nodemap is None:
1834 if nodemap is None:
1835 return
1835 return
1836
1836
1837 try:
1837 try:
1838 nodemap[node]
1838 nodemap[node]
1839 except error.RevlogError:
1839 except error.RevlogError:
1840 pass
1840 pass
1841
1841
1842 def resolvenodes(nodes, count=1):
1842 def resolvenodes(nodes, count=1):
1843 nodemap = revlogio.parseindex(data, inline)[1]
1843 nodemap = revlogio.parseindex(data, inline)[1]
1844 if nodemap is None:
1844 if nodemap is None:
1845 return
1845 return
1846
1846
1847 for i in range(count):
1847 for i in range(count):
1848 for node in nodes:
1848 for node in nodes:
1849 try:
1849 try:
1850 nodemap[node]
1850 nodemap[node]
1851 except error.RevlogError:
1851 except error.RevlogError:
1852 pass
1852 pass
1853
1853
1854 benches = [
1854 benches = [
1855 (constructor, b'revlog constructor'),
1855 (constructor, b'revlog constructor'),
1856 (read, b'read'),
1856 (read, b'read'),
1857 (parseindex, b'create index object'),
1857 (parseindex, b'create index object'),
1858 (lambda: getentry(0), b'retrieve index entry for rev 0'),
1858 (lambda: getentry(0), b'retrieve index entry for rev 0'),
1859 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
1859 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
1860 (lambda: resolvenode(node0), b'look up node at rev 0'),
1860 (lambda: resolvenode(node0), b'look up node at rev 0'),
1861 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
1861 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
1862 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
1862 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
1863 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
1863 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
1864 (lambda: resolvenode(node100), b'look up node at tip'),
1864 (lambda: resolvenode(node100), b'look up node at tip'),
1865 # 2x variation is to measure caching impact.
1865 # 2x variation is to measure caching impact.
1866 (lambda: resolvenodes(allnodes),
1866 (lambda: resolvenodes(allnodes),
1867 b'look up all nodes (forward)'),
1867 b'look up all nodes (forward)'),
1868 (lambda: resolvenodes(allnodes, 2),
1868 (lambda: resolvenodes(allnodes, 2),
1869 b'look up all nodes 2x (forward)'),
1869 b'look up all nodes 2x (forward)'),
1870 (lambda: resolvenodes(allnodesrev),
1870 (lambda: resolvenodes(allnodesrev),
1871 b'look up all nodes (reverse)'),
1871 b'look up all nodes (reverse)'),
1872 (lambda: resolvenodes(allnodesrev, 2),
1872 (lambda: resolvenodes(allnodesrev, 2),
1873 b'look up all nodes 2x (reverse)'),
1873 b'look up all nodes 2x (reverse)'),
1874 (lambda: getentries(allrevs),
1874 (lambda: getentries(allrevs),
1875 b'retrieve all index entries (forward)'),
1875 b'retrieve all index entries (forward)'),
1876 (lambda: getentries(allrevs, 2),
1876 (lambda: getentries(allrevs, 2),
1877 b'retrieve all index entries 2x (forward)'),
1877 b'retrieve all index entries 2x (forward)'),
1878 (lambda: getentries(allrevsrev),
1878 (lambda: getentries(allrevsrev),
1879 b'retrieve all index entries (reverse)'),
1879 b'retrieve all index entries (reverse)'),
1880 (lambda: getentries(allrevsrev, 2),
1880 (lambda: getentries(allrevsrev, 2),
1881 b'retrieve all index entries 2x (reverse)'),
1881 b'retrieve all index entries 2x (reverse)'),
1882 ]
1882 ]
1883
1883
1884 for fn, title in benches:
1884 for fn, title in benches:
1885 timer, fm = gettimer(ui, opts)
1885 timer, fm = gettimer(ui, opts)
1886 timer(fn, title=title)
1886 timer(fn, title=title)
1887 fm.end()
1887 fm.end()
1888
1888
1889 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
1889 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
1890 [(b'd', b'dist', 100, b'distance between the revisions'),
1890 [(b'd', b'dist', 100, b'distance between the revisions'),
1891 (b's', b'startrev', 0, b'revision to start reading at'),
1891 (b's', b'startrev', 0, b'revision to start reading at'),
1892 (b'', b'reverse', False, b'read in reverse')],
1892 (b'', b'reverse', False, b'read in reverse')],
1893 b'-c|-m|FILE')
1893 b'-c|-m|FILE')
1894 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
1894 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
1895 **opts):
1895 **opts):
1896 """Benchmark reading a series of revisions from a revlog.
1896 """Benchmark reading a series of revisions from a revlog.
1897
1897
1898 By default, we read every ``-d/--dist`` revision from 0 to tip of
1898 By default, we read every ``-d/--dist`` revision from 0 to tip of
1899 the specified revlog.
1899 the specified revlog.
1900
1900
1901 The start revision can be defined via ``-s/--startrev``.
1901 The start revision can be defined via ``-s/--startrev``.
1902 """
1902 """
1903 opts = _byteskwargs(opts)
1903 opts = _byteskwargs(opts)
1904
1904
1905 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
1905 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
1906 rllen = getlen(ui)(rl)
1906 rllen = getlen(ui)(rl)
1907
1907
1908 if startrev < 0:
1908 if startrev < 0:
1909 startrev = rllen + startrev
1909 startrev = rllen + startrev
1910
1910
1911 def d():
1911 def d():
1912 rl.clearcaches()
1912 rl.clearcaches()
1913
1913
1914 beginrev = startrev
1914 beginrev = startrev
1915 endrev = rllen
1915 endrev = rllen
1916 dist = opts[b'dist']
1916 dist = opts[b'dist']
1917
1917
1918 if reverse:
1918 if reverse:
1919 beginrev, endrev = endrev - 1, beginrev - 1
1919 beginrev, endrev = endrev - 1, beginrev - 1
1920 dist = -1 * dist
1920 dist = -1 * dist
1921
1921
1922 for x in _xrange(beginrev, endrev, dist):
1922 for x in _xrange(beginrev, endrev, dist):
1923 # Old revisions don't support passing int.
1923 # Old revisions don't support passing int.
1924 n = rl.node(x)
1924 n = rl.node(x)
1925 rl.revision(n)
1925 rl.revision(n)
1926
1926
1927 timer, fm = gettimer(ui, opts)
1927 timer, fm = gettimer(ui, opts)
1928 timer(d)
1928 timer(d)
1929 fm.end()
1929 fm.end()
1930
1930
1931 @command(b'perfrevlogwrite', revlogopts + formatteropts +
1931 @command(b'perfrevlogwrite', revlogopts + formatteropts +
1932 [(b's', b'startrev', 1000, b'revision to start writing at'),
1932 [(b's', b'startrev', 1000, b'revision to start writing at'),
1933 (b'', b'stoprev', -1, b'last revision to write'),
1933 (b'', b'stoprev', -1, b'last revision to write'),
1934 (b'', b'count', 3, b'last revision to write'),
1934 (b'', b'count', 3, b'last revision to write'),
1935 (b'', b'details', False, b'print timing for every revisions tested'),
1935 (b'', b'details', False, b'print timing for every revisions tested'),
1936 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
1936 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
1937 (b'', b'lazydeltabase', True, b'try the provided delta first'),
1937 (b'', b'lazydeltabase', True, b'try the provided delta first'),
1938 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1938 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1939 ],
1939 ],
1940 b'-c|-m|FILE')
1940 b'-c|-m|FILE')
1941 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
1941 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
1942 """Benchmark writing a series of revisions to a revlog.
1942 """Benchmark writing a series of revisions to a revlog.
1943
1943
1944 Possible source values are:
1944 Possible source values are:
1945 * `full`: add from a full text (default).
1945 * `full`: add from a full text (default).
1946 * `parent-1`: add from a delta to the first parent
1946 * `parent-1`: add from a delta to the first parent
1947 * `parent-2`: add from a delta to the second parent if it exists
1947 * `parent-2`: add from a delta to the second parent if it exists
1948 (use a delta from the first parent otherwise)
1948 (use a delta from the first parent otherwise)
1949 * `parent-smallest`: add from the smallest delta (either p1 or p2)
1949 * `parent-smallest`: add from the smallest delta (either p1 or p2)
1950 * `storage`: add from the existing precomputed deltas
1950 * `storage`: add from the existing precomputed deltas
1951 """
1951 """
1952 opts = _byteskwargs(opts)
1952 opts = _byteskwargs(opts)
1953
1953
1954 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
1954 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
1955 rllen = getlen(ui)(rl)
1955 rllen = getlen(ui)(rl)
1956 if startrev < 0:
1956 if startrev < 0:
1957 startrev = rllen + startrev
1957 startrev = rllen + startrev
1958 if stoprev < 0:
1958 if stoprev < 0:
1959 stoprev = rllen + stoprev
1959 stoprev = rllen + stoprev
1960
1960
1961 lazydeltabase = opts['lazydeltabase']
1961 lazydeltabase = opts['lazydeltabase']
1962 source = opts['source']
1962 source = opts['source']
1963 clearcaches = opts['clear_caches']
1963 clearcaches = opts['clear_caches']
1964 validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest',
1964 validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest',
1965 b'storage')
1965 b'storage')
1966 if source not in validsource:
1966 if source not in validsource:
1967 raise error.Abort('invalid source type: %s' % source)
1967 raise error.Abort('invalid source type: %s' % source)
1968
1968
1969 ### actually gather results
1969 ### actually gather results
1970 count = opts['count']
1970 count = opts['count']
1971 if count <= 0:
1971 if count <= 0:
1972 raise error.Abort('invalide run count: %d' % count)
1972 raise error.Abort('invalide run count: %d' % count)
1973 allresults = []
1973 allresults = []
1974 for c in range(count):
1974 for c in range(count):
1975 timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1,
1975 timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1,
1976 lazydeltabase=lazydeltabase,
1976 lazydeltabase=lazydeltabase,
1977 clearcaches=clearcaches)
1977 clearcaches=clearcaches)
1978 allresults.append(timing)
1978 allresults.append(timing)
1979
1979
1980 ### consolidate the results in a single list
1980 ### consolidate the results in a single list
1981 results = []
1981 results = []
1982 for idx, (rev, t) in enumerate(allresults[0]):
1982 for idx, (rev, t) in enumerate(allresults[0]):
1983 ts = [t]
1983 ts = [t]
1984 for other in allresults[1:]:
1984 for other in allresults[1:]:
1985 orev, ot = other[idx]
1985 orev, ot = other[idx]
1986 assert orev == rev
1986 assert orev == rev
1987 ts.append(ot)
1987 ts.append(ot)
1988 results.append((rev, ts))
1988 results.append((rev, ts))
1989 resultcount = len(results)
1989 resultcount = len(results)
1990
1990
1991 ### Compute and display relevant statistics
1991 ### Compute and display relevant statistics
1992
1992
1993 # get a formatter
1993 # get a formatter
1994 fm = ui.formatter(b'perf', opts)
1994 fm = ui.formatter(b'perf', opts)
1995 displayall = ui.configbool(b"perf", b"all-timing", False)
1995 displayall = ui.configbool(b"perf", b"all-timing", False)
1996
1996
1997 # print individual details if requested
1997 # print individual details if requested
1998 if opts['details']:
1998 if opts['details']:
1999 for idx, item in enumerate(results, 1):
1999 for idx, item in enumerate(results, 1):
2000 rev, data = item
2000 rev, data = item
2001 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
2001 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
2002 formatone(fm, data, title=title, displayall=displayall)
2002 formatone(fm, data, title=title, displayall=displayall)
2003
2003
2004 # sorts results by median time
2004 # sorts results by median time
2005 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
2005 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
2006 # list of (name, index) to display)
2006 # list of (name, index) to display)
2007 relevants = [
2007 relevants = [
2008 ("min", 0),
2008 ("min", 0),
2009 ("10%", resultcount * 10 // 100),
2009 ("10%", resultcount * 10 // 100),
2010 ("25%", resultcount * 25 // 100),
2010 ("25%", resultcount * 25 // 100),
2011 ("50%", resultcount * 70 // 100),
2011 ("50%", resultcount * 70 // 100),
2012 ("75%", resultcount * 75 // 100),
2012 ("75%", resultcount * 75 // 100),
2013 ("90%", resultcount * 90 // 100),
2013 ("90%", resultcount * 90 // 100),
2014 ("95%", resultcount * 95 // 100),
2014 ("95%", resultcount * 95 // 100),
2015 ("99%", resultcount * 99 // 100),
2015 ("99%", resultcount * 99 // 100),
2016 ("99.9%", resultcount * 999 // 1000),
2016 ("99.9%", resultcount * 999 // 1000),
2017 ("99.99%", resultcount * 9999 // 10000),
2017 ("99.99%", resultcount * 9999 // 10000),
2018 ("99.999%", resultcount * 99999 // 100000),
2018 ("99.999%", resultcount * 99999 // 100000),
2019 ("max", -1),
2019 ("max", -1),
2020 ]
2020 ]
2021 if not ui.quiet:
2021 if not ui.quiet:
2022 for name, idx in relevants:
2022 for name, idx in relevants:
2023 data = results[idx]
2023 data = results[idx]
2024 title = '%s of %d, rev %d' % (name, resultcount, data[0])
2024 title = '%s of %d, rev %d' % (name, resultcount, data[0])
2025 formatone(fm, data[1], title=title, displayall=displayall)
2025 formatone(fm, data[1], title=title, displayall=displayall)
2026
2026
2027 # XXX summing that many float will not be very precise, we ignore this fact
2027 # XXX summing that many float will not be very precise, we ignore this fact
2028 # for now
2028 # for now
2029 totaltime = []
2029 totaltime = []
2030 for item in allresults:
2030 for item in allresults:
2031 totaltime.append((sum(x[1][0] for x in item),
2031 totaltime.append((sum(x[1][0] for x in item),
2032 sum(x[1][1] for x in item),
2032 sum(x[1][1] for x in item),
2033 sum(x[1][2] for x in item),)
2033 sum(x[1][2] for x in item),)
2034 )
2034 )
2035 formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
2035 formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
2036 displayall=displayall)
2036 displayall=displayall)
2037 fm.end()
2037 fm.end()
2038
2038
2039 class _faketr(object):
2039 class _faketr(object):
2040 def add(s, x, y, z=None):
2040 def add(s, x, y, z=None):
2041 return None
2041 return None
2042
2042
2043 def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None,
2043 def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None,
2044 lazydeltabase=True, clearcaches=True):
2044 lazydeltabase=True, clearcaches=True):
2045 timings = []
2045 timings = []
2046 tr = _faketr()
2046 tr = _faketr()
2047 with _temprevlog(ui, orig, startrev) as dest:
2047 with _temprevlog(ui, orig, startrev) as dest:
2048 dest._lazydeltabase = lazydeltabase
2048 dest._lazydeltabase = lazydeltabase
2049 revs = list(orig.revs(startrev, stoprev))
2049 revs = list(orig.revs(startrev, stoprev))
2050 total = len(revs)
2050 total = len(revs)
2051 topic = 'adding'
2051 topic = 'adding'
2052 if runidx is not None:
2052 if runidx is not None:
2053 topic += ' (run #%d)' % runidx
2053 topic += ' (run #%d)' % runidx
2054 # Support both old and new progress API
2054 # Support both old and new progress API
2055 if util.safehasattr(ui, 'makeprogress'):
2055 if util.safehasattr(ui, 'makeprogress'):
2056 progress = ui.makeprogress(topic, unit='revs', total=total)
2056 progress = ui.makeprogress(topic, unit='revs', total=total)
2057 def updateprogress(pos):
2057 def updateprogress(pos):
2058 progress.update(pos)
2058 progress.update(pos)
2059 def completeprogress():
2059 def completeprogress():
2060 progress.complete()
2060 progress.complete()
2061 else:
2061 else:
2062 def updateprogress(pos):
2062 def updateprogress(pos):
2063 ui.progress(topic, pos, unit='revs', total=total)
2063 ui.progress(topic, pos, unit='revs', total=total)
2064 def completeprogress():
2064 def completeprogress():
2065 ui.progress(topic, None, unit='revs', total=total)
2065 ui.progress(topic, None, unit='revs', total=total)
2066
2066
2067 for idx, rev in enumerate(revs):
2067 for idx, rev in enumerate(revs):
2068 updateprogress(idx)
2068 updateprogress(idx)
2069 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
2069 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
2070 if clearcaches:
2070 if clearcaches:
2071 dest.index.clearcaches()
2071 dest.index.clearcaches()
2072 dest.clearcaches()
2072 dest.clearcaches()
2073 with timeone() as r:
2073 with timeone() as r:
2074 dest.addrawrevision(*addargs, **addkwargs)
2074 dest.addrawrevision(*addargs, **addkwargs)
2075 timings.append((rev, r[0]))
2075 timings.append((rev, r[0]))
2076 updateprogress(total)
2076 updateprogress(total)
2077 completeprogress()
2077 completeprogress()
2078 return timings
2078 return timings
2079
2079
2080 def _getrevisionseed(orig, rev, tr, source):
2080 def _getrevisionseed(orig, rev, tr, source):
2081 from mercurial.node import nullid
2081 from mercurial.node import nullid
2082
2082
2083 linkrev = orig.linkrev(rev)
2083 linkrev = orig.linkrev(rev)
2084 node = orig.node(rev)
2084 node = orig.node(rev)
2085 p1, p2 = orig.parents(node)
2085 p1, p2 = orig.parents(node)
2086 flags = orig.flags(rev)
2086 flags = orig.flags(rev)
2087 cachedelta = None
2087 cachedelta = None
2088 text = None
2088 text = None
2089
2089
2090 if source == b'full':
2090 if source == b'full':
2091 text = orig.revision(rev)
2091 text = orig.revision(rev)
2092 elif source == b'parent-1':
2092 elif source == b'parent-1':
2093 baserev = orig.rev(p1)
2093 baserev = orig.rev(p1)
2094 cachedelta = (baserev, orig.revdiff(p1, rev))
2094 cachedelta = (baserev, orig.revdiff(p1, rev))
2095 elif source == b'parent-2':
2095 elif source == b'parent-2':
2096 parent = p2
2096 parent = p2
2097 if p2 == nullid:
2097 if p2 == nullid:
2098 parent = p1
2098 parent = p1
2099 baserev = orig.rev(parent)
2099 baserev = orig.rev(parent)
2100 cachedelta = (baserev, orig.revdiff(parent, rev))
2100 cachedelta = (baserev, orig.revdiff(parent, rev))
2101 elif source == b'parent-smallest':
2101 elif source == b'parent-smallest':
2102 p1diff = orig.revdiff(p1, rev)
2102 p1diff = orig.revdiff(p1, rev)
2103 parent = p1
2103 parent = p1
2104 diff = p1diff
2104 diff = p1diff
2105 if p2 != nullid:
2105 if p2 != nullid:
2106 p2diff = orig.revdiff(p2, rev)
2106 p2diff = orig.revdiff(p2, rev)
2107 if len(p1diff) > len(p2diff):
2107 if len(p1diff) > len(p2diff):
2108 parent = p2
2108 parent = p2
2109 diff = p2diff
2109 diff = p2diff
2110 baserev = orig.rev(parent)
2110 baserev = orig.rev(parent)
2111 cachedelta = (baserev, diff)
2111 cachedelta = (baserev, diff)
2112 elif source == b'storage':
2112 elif source == b'storage':
2113 baserev = orig.deltaparent(rev)
2113 baserev = orig.deltaparent(rev)
2114 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
2114 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
2115
2115
2116 return ((text, tr, linkrev, p1, p2),
2116 return ((text, tr, linkrev, p1, p2),
2117 {'node': node, 'flags': flags, 'cachedelta': cachedelta})
2117 {'node': node, 'flags': flags, 'cachedelta': cachedelta})
2118
2118
2119 @contextlib.contextmanager
2119 @contextlib.contextmanager
2120 def _temprevlog(ui, orig, truncaterev):
2120 def _temprevlog(ui, orig, truncaterev):
2121 from mercurial import vfs as vfsmod
2121 from mercurial import vfs as vfsmod
2122
2122
2123 if orig._inline:
2123 if orig._inline:
2124 raise error.Abort('not supporting inline revlog (yet)')
2124 raise error.Abort('not supporting inline revlog (yet)')
2125
2125
2126 origindexpath = orig.opener.join(orig.indexfile)
2126 origindexpath = orig.opener.join(orig.indexfile)
2127 origdatapath = orig.opener.join(orig.datafile)
2127 origdatapath = orig.opener.join(orig.datafile)
2128 indexname = 'revlog.i'
2128 indexname = 'revlog.i'
2129 dataname = 'revlog.d'
2129 dataname = 'revlog.d'
2130
2130
2131 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
2131 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
2132 try:
2132 try:
2133 # copy the data file in a temporary directory
2133 # copy the data file in a temporary directory
2134 ui.debug('copying data in %s\n' % tmpdir)
2134 ui.debug('copying data in %s\n' % tmpdir)
2135 destindexpath = os.path.join(tmpdir, 'revlog.i')
2135 destindexpath = os.path.join(tmpdir, 'revlog.i')
2136 destdatapath = os.path.join(tmpdir, 'revlog.d')
2136 destdatapath = os.path.join(tmpdir, 'revlog.d')
2137 shutil.copyfile(origindexpath, destindexpath)
2137 shutil.copyfile(origindexpath, destindexpath)
2138 shutil.copyfile(origdatapath, destdatapath)
2138 shutil.copyfile(origdatapath, destdatapath)
2139
2139
2140 # remove the data we want to add again
2140 # remove the data we want to add again
2141 ui.debug('truncating data to be rewritten\n')
2141 ui.debug('truncating data to be rewritten\n')
2142 with open(destindexpath, 'ab') as index:
2142 with open(destindexpath, 'ab') as index:
2143 index.seek(0)
2143 index.seek(0)
2144 index.truncate(truncaterev * orig._io.size)
2144 index.truncate(truncaterev * orig._io.size)
2145 with open(destdatapath, 'ab') as data:
2145 with open(destdatapath, 'ab') as data:
2146 data.seek(0)
2146 data.seek(0)
2147 data.truncate(orig.start(truncaterev))
2147 data.truncate(orig.start(truncaterev))
2148
2148
2149 # instantiate a new revlog from the temporary copy
2149 # instantiate a new revlog from the temporary copy
2150 ui.debug('truncating adding to be rewritten\n')
2150 ui.debug('truncating adding to be rewritten\n')
2151 vfs = vfsmod.vfs(tmpdir)
2151 vfs = vfsmod.vfs(tmpdir)
2152 vfs.options = getattr(orig.opener, 'options', None)
2152 vfs.options = getattr(orig.opener, 'options', None)
2153
2153
2154 dest = revlog.revlog(vfs,
2154 dest = revlog.revlog(vfs,
2155 indexfile=indexname,
2155 indexfile=indexname,
2156 datafile=dataname)
2156 datafile=dataname)
2157 if dest._inline:
2157 if dest._inline:
2158 raise error.Abort('not supporting inline revlog (yet)')
2158 raise error.Abort('not supporting inline revlog (yet)')
2159 # make sure internals are initialized
2159 # make sure internals are initialized
2160 dest.revision(len(dest) - 1)
2160 dest.revision(len(dest) - 1)
2161 yield dest
2161 yield dest
2162 del dest, vfs
2162 del dest, vfs
2163 finally:
2163 finally:
2164 shutil.rmtree(tmpdir, True)
2164 shutil.rmtree(tmpdir, True)
2165
2165
2166 @command(b'perfrevlogchunks', revlogopts + formatteropts +
2166 @command(b'perfrevlogchunks', revlogopts + formatteropts +
2167 [(b'e', b'engines', b'', b'compression engines to use'),
2167 [(b'e', b'engines', b'', b'compression engines to use'),
2168 (b's', b'startrev', 0, b'revision to start at')],
2168 (b's', b'startrev', 0, b'revision to start at')],
2169 b'-c|-m|FILE')
2169 b'-c|-m|FILE')
2170 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
2170 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
2171 """Benchmark operations on revlog chunks.
2171 """Benchmark operations on revlog chunks.
2172
2172
2173 Logically, each revlog is a collection of fulltext revisions. However,
2173 Logically, each revlog is a collection of fulltext revisions. However,
2174 stored within each revlog are "chunks" of possibly compressed data. This
2174 stored within each revlog are "chunks" of possibly compressed data. This
2175 data needs to be read and decompressed or compressed and written.
2175 data needs to be read and decompressed or compressed and written.
2176
2176
2177 This command measures the time it takes to read+decompress and recompress
2177 This command measures the time it takes to read+decompress and recompress
2178 chunks in a revlog. It effectively isolates I/O and compression performance.
2178 chunks in a revlog. It effectively isolates I/O and compression performance.
2179 For measurements of higher-level operations like resolving revisions,
2179 For measurements of higher-level operations like resolving revisions,
2180 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
2180 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
2181 """
2181 """
2182 opts = _byteskwargs(opts)
2182 opts = _byteskwargs(opts)
2183
2183
2184 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
2184 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
2185
2185
2186 # _chunkraw was renamed to _getsegmentforrevs.
2186 # _chunkraw was renamed to _getsegmentforrevs.
2187 try:
2187 try:
2188 segmentforrevs = rl._getsegmentforrevs
2188 segmentforrevs = rl._getsegmentforrevs
2189 except AttributeError:
2189 except AttributeError:
2190 segmentforrevs = rl._chunkraw
2190 segmentforrevs = rl._chunkraw
2191
2191
2192 # Verify engines argument.
2192 # Verify engines argument.
2193 if engines:
2193 if engines:
2194 engines = set(e.strip() for e in engines.split(b','))
2194 engines = set(e.strip() for e in engines.split(b','))
2195 for engine in engines:
2195 for engine in engines:
2196 try:
2196 try:
2197 util.compressionengines[engine]
2197 util.compressionengines[engine]
2198 except KeyError:
2198 except KeyError:
2199 raise error.Abort(b'unknown compression engine: %s' % engine)
2199 raise error.Abort(b'unknown compression engine: %s' % engine)
2200 else:
2200 else:
2201 engines = []
2201 engines = []
2202 for e in util.compengines:
2202 for e in util.compengines:
2203 engine = util.compengines[e]
2203 engine = util.compengines[e]
2204 try:
2204 try:
2205 if engine.available():
2205 if engine.available():
2206 engine.revlogcompressor().compress(b'dummy')
2206 engine.revlogcompressor().compress(b'dummy')
2207 engines.append(e)
2207 engines.append(e)
2208 except NotImplementedError:
2208 except NotImplementedError:
2209 pass
2209 pass
2210
2210
2211 revs = list(rl.revs(startrev, len(rl) - 1))
2211 revs = list(rl.revs(startrev, len(rl) - 1))
2212
2212
2213 def rlfh(rl):
2213 def rlfh(rl):
2214 if rl._inline:
2214 if rl._inline:
2215 return getsvfs(repo)(rl.indexfile)
2215 return getsvfs(repo)(rl.indexfile)
2216 else:
2216 else:
2217 return getsvfs(repo)(rl.datafile)
2217 return getsvfs(repo)(rl.datafile)
2218
2218
2219 def doread():
2219 def doread():
2220 rl.clearcaches()
2220 rl.clearcaches()
2221 for rev in revs:
2221 for rev in revs:
2222 segmentforrevs(rev, rev)
2222 segmentforrevs(rev, rev)
2223
2223
2224 def doreadcachedfh():
2224 def doreadcachedfh():
2225 rl.clearcaches()
2225 rl.clearcaches()
2226 fh = rlfh(rl)
2226 fh = rlfh(rl)
2227 for rev in revs:
2227 for rev in revs:
2228 segmentforrevs(rev, rev, df=fh)
2228 segmentforrevs(rev, rev, df=fh)
2229
2229
2230 def doreadbatch():
2230 def doreadbatch():
2231 rl.clearcaches()
2231 rl.clearcaches()
2232 segmentforrevs(revs[0], revs[-1])
2232 segmentforrevs(revs[0], revs[-1])
2233
2233
2234 def doreadbatchcachedfh():
2234 def doreadbatchcachedfh():
2235 rl.clearcaches()
2235 rl.clearcaches()
2236 fh = rlfh(rl)
2236 fh = rlfh(rl)
2237 segmentforrevs(revs[0], revs[-1], df=fh)
2237 segmentforrevs(revs[0], revs[-1], df=fh)
2238
2238
2239 def dochunk():
2239 def dochunk():
2240 rl.clearcaches()
2240 rl.clearcaches()
2241 fh = rlfh(rl)
2241 fh = rlfh(rl)
2242 for rev in revs:
2242 for rev in revs:
2243 rl._chunk(rev, df=fh)
2243 rl._chunk(rev, df=fh)
2244
2244
2245 chunks = [None]
2245 chunks = [None]
2246
2246
2247 def dochunkbatch():
2247 def dochunkbatch():
2248 rl.clearcaches()
2248 rl.clearcaches()
2249 fh = rlfh(rl)
2249 fh = rlfh(rl)
2250 # Save chunks as a side-effect.
2250 # Save chunks as a side-effect.
2251 chunks[0] = rl._chunks(revs, df=fh)
2251 chunks[0] = rl._chunks(revs, df=fh)
2252
2252
2253 def docompress(compressor):
2253 def docompress(compressor):
2254 rl.clearcaches()
2254 rl.clearcaches()
2255
2255
2256 try:
2256 try:
2257 # Swap in the requested compression engine.
2257 # Swap in the requested compression engine.
2258 oldcompressor = rl._compressor
2258 oldcompressor = rl._compressor
2259 rl._compressor = compressor
2259 rl._compressor = compressor
2260 for chunk in chunks[0]:
2260 for chunk in chunks[0]:
2261 rl.compress(chunk)
2261 rl.compress(chunk)
2262 finally:
2262 finally:
2263 rl._compressor = oldcompressor
2263 rl._compressor = oldcompressor
2264
2264
2265 benches = [
2265 benches = [
2266 (lambda: doread(), b'read'),
2266 (lambda: doread(), b'read'),
2267 (lambda: doreadcachedfh(), b'read w/ reused fd'),
2267 (lambda: doreadcachedfh(), b'read w/ reused fd'),
2268 (lambda: doreadbatch(), b'read batch'),
2268 (lambda: doreadbatch(), b'read batch'),
2269 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
2269 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
2270 (lambda: dochunk(), b'chunk'),
2270 (lambda: dochunk(), b'chunk'),
2271 (lambda: dochunkbatch(), b'chunk batch'),
2271 (lambda: dochunkbatch(), b'chunk batch'),
2272 ]
2272 ]
2273
2273
2274 for engine in sorted(engines):
2274 for engine in sorted(engines):
2275 compressor = util.compengines[engine].revlogcompressor()
2275 compressor = util.compengines[engine].revlogcompressor()
2276 benches.append((functools.partial(docompress, compressor),
2276 benches.append((functools.partial(docompress, compressor),
2277 b'compress w/ %s' % engine))
2277 b'compress w/ %s' % engine))
2278
2278
2279 for fn, title in benches:
2279 for fn, title in benches:
2280 timer, fm = gettimer(ui, opts)
2280 timer, fm = gettimer(ui, opts)
2281 timer(fn, title=title)
2281 timer(fn, title=title)
2282 fm.end()
2282 fm.end()
2283
2283
2284 @command(b'perfrevlogrevision', revlogopts + formatteropts +
2284 @command(b'perfrevlogrevision', revlogopts + formatteropts +
2285 [(b'', b'cache', False, b'use caches instead of clearing')],
2285 [(b'', b'cache', False, b'use caches instead of clearing')],
2286 b'-c|-m|FILE REV')
2286 b'-c|-m|FILE REV')
2287 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
2287 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
2288 """Benchmark obtaining a revlog revision.
2288 """Benchmark obtaining a revlog revision.
2289
2289
2290 Obtaining a revlog revision consists of roughly the following steps:
2290 Obtaining a revlog revision consists of roughly the following steps:
2291
2291
2292 1. Compute the delta chain
2292 1. Compute the delta chain
2293 2. Slice the delta chain if applicable
2293 2. Slice the delta chain if applicable
2294 3. Obtain the raw chunks for that delta chain
2294 3. Obtain the raw chunks for that delta chain
2295 4. Decompress each raw chunk
2295 4. Decompress each raw chunk
2296 5. Apply binary patches to obtain fulltext
2296 5. Apply binary patches to obtain fulltext
2297 6. Verify hash of fulltext
2297 6. Verify hash of fulltext
2298
2298
2299 This command measures the time spent in each of these phases.
2299 This command measures the time spent in each of these phases.
2300 """
2300 """
2301 opts = _byteskwargs(opts)
2301 opts = _byteskwargs(opts)
2302
2302
2303 if opts.get(b'changelog') or opts.get(b'manifest'):
2303 if opts.get(b'changelog') or opts.get(b'manifest'):
2304 file_, rev = None, file_
2304 file_, rev = None, file_
2305 elif rev is None:
2305 elif rev is None:
2306 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
2306 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
2307
2307
2308 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
2308 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
2309
2309
2310 # _chunkraw was renamed to _getsegmentforrevs.
2310 # _chunkraw was renamed to _getsegmentforrevs.
2311 try:
2311 try:
2312 segmentforrevs = r._getsegmentforrevs
2312 segmentforrevs = r._getsegmentforrevs
2313 except AttributeError:
2313 except AttributeError:
2314 segmentforrevs = r._chunkraw
2314 segmentforrevs = r._chunkraw
2315
2315
2316 node = r.lookup(rev)
2316 node = r.lookup(rev)
2317 rev = r.rev(node)
2317 rev = r.rev(node)
2318
2318
2319 def getrawchunks(data, chain):
2319 def getrawchunks(data, chain):
2320 start = r.start
2320 start = r.start
2321 length = r.length
2321 length = r.length
2322 inline = r._inline
2322 inline = r._inline
2323 iosize = r._io.size
2323 iosize = r._io.size
2324 buffer = util.buffer
2324 buffer = util.buffer
2325
2325
2326 chunks = []
2326 chunks = []
2327 ladd = chunks.append
2327 ladd = chunks.append
2328 for idx, item in enumerate(chain):
2328 for idx, item in enumerate(chain):
2329 offset = start(item[0])
2329 offset = start(item[0])
2330 bits = data[idx]
2330 bits = data[idx]
2331 for rev in item:
2331 for rev in item:
2332 chunkstart = start(rev)
2332 chunkstart = start(rev)
2333 if inline:
2333 if inline:
2334 chunkstart += (rev + 1) * iosize
2334 chunkstart += (rev + 1) * iosize
2335 chunklength = length(rev)
2335 chunklength = length(rev)
2336 ladd(buffer(bits, chunkstart - offset, chunklength))
2336 ladd(buffer(bits, chunkstart - offset, chunklength))
2337
2337
2338 return chunks
2338 return chunks
2339
2339
2340 def dodeltachain(rev):
2340 def dodeltachain(rev):
2341 if not cache:
2341 if not cache:
2342 r.clearcaches()
2342 r.clearcaches()
2343 r._deltachain(rev)
2343 r._deltachain(rev)
2344
2344
2345 def doread(chain):
2345 def doread(chain):
2346 if not cache:
2346 if not cache:
2347 r.clearcaches()
2347 r.clearcaches()
2348 for item in slicedchain:
2348 for item in slicedchain:
2349 segmentforrevs(item[0], item[-1])
2349 segmentforrevs(item[0], item[-1])
2350
2350
2351 def doslice(r, chain, size):
2351 def doslice(r, chain, size):
2352 for s in slicechunk(r, chain, targetsize=size):
2352 for s in slicechunk(r, chain, targetsize=size):
2353 pass
2353 pass
2354
2354
2355 def dorawchunks(data, chain):
2355 def dorawchunks(data, chain):
2356 if not cache:
2356 if not cache:
2357 r.clearcaches()
2357 r.clearcaches()
2358 getrawchunks(data, chain)
2358 getrawchunks(data, chain)
2359
2359
2360 def dodecompress(chunks):
2360 def dodecompress(chunks):
2361 decomp = r.decompress
2361 decomp = r.decompress
2362 for chunk in chunks:
2362 for chunk in chunks:
2363 decomp(chunk)
2363 decomp(chunk)
2364
2364
2365 def dopatch(text, bins):
2365 def dopatch(text, bins):
2366 if not cache:
2366 if not cache:
2367 r.clearcaches()
2367 r.clearcaches()
2368 mdiff.patches(text, bins)
2368 mdiff.patches(text, bins)
2369
2369
2370 def dohash(text):
2370 def dohash(text):
2371 if not cache:
2371 if not cache:
2372 r.clearcaches()
2372 r.clearcaches()
2373 r.checkhash(text, node, rev=rev)
2373 r.checkhash(text, node, rev=rev)
2374
2374
2375 def dorevision():
2375 def dorevision():
2376 if not cache:
2376 if not cache:
2377 r.clearcaches()
2377 r.clearcaches()
2378 r.revision(node)
2378 r.revision(node)
2379
2379
2380 try:
2380 try:
2381 from mercurial.revlogutils.deltas import slicechunk
2381 from mercurial.revlogutils.deltas import slicechunk
2382 except ImportError:
2382 except ImportError:
2383 slicechunk = getattr(revlog, '_slicechunk', None)
2383 slicechunk = getattr(revlog, '_slicechunk', None)
2384
2384
2385 size = r.length(rev)
2385 size = r.length(rev)
2386 chain = r._deltachain(rev)[0]
2386 chain = r._deltachain(rev)[0]
2387 if not getattr(r, '_withsparseread', False):
2387 if not getattr(r, '_withsparseread', False):
2388 slicedchain = (chain,)
2388 slicedchain = (chain,)
2389 else:
2389 else:
2390 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
2390 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
2391 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
2391 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
2392 rawchunks = getrawchunks(data, slicedchain)
2392 rawchunks = getrawchunks(data, slicedchain)
2393 bins = r._chunks(chain)
2393 bins = r._chunks(chain)
2394 text = bytes(bins[0])
2394 text = bytes(bins[0])
2395 bins = bins[1:]
2395 bins = bins[1:]
2396 text = mdiff.patches(text, bins)
2396 text = mdiff.patches(text, bins)
2397
2397
2398 benches = [
2398 benches = [
2399 (lambda: dorevision(), b'full'),
2399 (lambda: dorevision(), b'full'),
2400 (lambda: dodeltachain(rev), b'deltachain'),
2400 (lambda: dodeltachain(rev), b'deltachain'),
2401 (lambda: doread(chain), b'read'),
2401 (lambda: doread(chain), b'read'),
2402 ]
2402 ]
2403
2403
2404 if getattr(r, '_withsparseread', False):
2404 if getattr(r, '_withsparseread', False):
2405 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
2405 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
2406 benches.append(slicing)
2406 benches.append(slicing)
2407
2407
2408 benches.extend([
2408 benches.extend([
2409 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
2409 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
2410 (lambda: dodecompress(rawchunks), b'decompress'),
2410 (lambda: dodecompress(rawchunks), b'decompress'),
2411 (lambda: dopatch(text, bins), b'patch'),
2411 (lambda: dopatch(text, bins), b'patch'),
2412 (lambda: dohash(text), b'hash'),
2412 (lambda: dohash(text), b'hash'),
2413 ])
2413 ])
2414
2414
2415 timer, fm = gettimer(ui, opts)
2415 timer, fm = gettimer(ui, opts)
2416 for fn, title in benches:
2416 for fn, title in benches:
2417 timer(fn, title=title)
2417 timer(fn, title=title)
2418 fm.end()
2418 fm.end()
2419
2419
2420 @command(b'perfrevset',
2420 @command(b'perfrevset',
2421 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
2421 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
2422 (b'', b'contexts', False, b'obtain changectx for each revision')]
2422 (b'', b'contexts', False, b'obtain changectx for each revision')]
2423 + formatteropts, b"REVSET")
2423 + formatteropts, b"REVSET")
2424 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
2424 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
2425 """benchmark the execution time of a revset
2425 """benchmark the execution time of a revset
2426
2426
2427 Use the --clean option if need to evaluate the impact of build volatile
2427 Use the --clean option if need to evaluate the impact of build volatile
2428 revisions set cache on the revset execution. Volatile cache hold filtered
2428 revisions set cache on the revset execution. Volatile cache hold filtered
2429 and obsolete related cache."""
2429 and obsolete related cache."""
2430 opts = _byteskwargs(opts)
2430 opts = _byteskwargs(opts)
2431
2431
2432 timer, fm = gettimer(ui, opts)
2432 timer, fm = gettimer(ui, opts)
2433 def d():
2433 def d():
2434 if clear:
2434 if clear:
2435 repo.invalidatevolatilesets()
2435 repo.invalidatevolatilesets()
2436 if contexts:
2436 if contexts:
2437 for ctx in repo.set(expr): pass
2437 for ctx in repo.set(expr): pass
2438 else:
2438 else:
2439 for r in repo.revs(expr): pass
2439 for r in repo.revs(expr): pass
2440 timer(d)
2440 timer(d)
2441 fm.end()
2441 fm.end()
2442
2442
2443 @command(b'perfvolatilesets',
2443 @command(b'perfvolatilesets',
2444 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
2444 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
2445 ] + formatteropts)
2445 ] + formatteropts)
2446 def perfvolatilesets(ui, repo, *names, **opts):
2446 def perfvolatilesets(ui, repo, *names, **opts):
2447 """benchmark the computation of various volatile set
2447 """benchmark the computation of various volatile set
2448
2448
2449 Volatile set computes element related to filtering and obsolescence."""
2449 Volatile set computes element related to filtering and obsolescence."""
2450 opts = _byteskwargs(opts)
2450 opts = _byteskwargs(opts)
2451 timer, fm = gettimer(ui, opts)
2451 timer, fm = gettimer(ui, opts)
2452 repo = repo.unfiltered()
2452 repo = repo.unfiltered()
2453
2453
2454 def getobs(name):
2454 def getobs(name):
2455 def d():
2455 def d():
2456 repo.invalidatevolatilesets()
2456 repo.invalidatevolatilesets()
2457 if opts[b'clear_obsstore']:
2457 if opts[b'clear_obsstore']:
2458 clearfilecache(repo, b'obsstore')
2458 clearfilecache(repo, b'obsstore')
2459 obsolete.getrevs(repo, name)
2459 obsolete.getrevs(repo, name)
2460 return d
2460 return d
2461
2461
2462 allobs = sorted(obsolete.cachefuncs)
2462 allobs = sorted(obsolete.cachefuncs)
2463 if names:
2463 if names:
2464 allobs = [n for n in allobs if n in names]
2464 allobs = [n for n in allobs if n in names]
2465
2465
2466 for name in allobs:
2466 for name in allobs:
2467 timer(getobs(name), title=name)
2467 timer(getobs(name), title=name)
2468
2468
2469 def getfiltered(name):
2469 def getfiltered(name):
2470 def d():
2470 def d():
2471 repo.invalidatevolatilesets()
2471 repo.invalidatevolatilesets()
2472 if opts[b'clear_obsstore']:
2472 if opts[b'clear_obsstore']:
2473 clearfilecache(repo, b'obsstore')
2473 clearfilecache(repo, b'obsstore')
2474 repoview.filterrevs(repo, name)
2474 repoview.filterrevs(repo, name)
2475 return d
2475 return d
2476
2476
2477 allfilter = sorted(repoview.filtertable)
2477 allfilter = sorted(repoview.filtertable)
2478 if names:
2478 if names:
2479 allfilter = [n for n in allfilter if n in names]
2479 allfilter = [n for n in allfilter if n in names]
2480
2480
2481 for name in allfilter:
2481 for name in allfilter:
2482 timer(getfiltered(name), title=name)
2482 timer(getfiltered(name), title=name)
2483 fm.end()
2483 fm.end()
2484
2484
2485 @command(b'perfbranchmap',
2485 @command(b'perfbranchmap',
2486 [(b'f', b'full', False,
2486 [(b'f', b'full', False,
2487 b'Includes build time of subset'),
2487 b'Includes build time of subset'),
2488 (b'', b'clear-revbranch', False,
2488 (b'', b'clear-revbranch', False,
2489 b'purge the revbranch cache between computation'),
2489 b'purge the revbranch cache between computation'),
2490 ] + formatteropts)
2490 ] + formatteropts)
2491 def perfbranchmap(ui, repo, *filternames, **opts):
2491 def perfbranchmap(ui, repo, *filternames, **opts):
2492 """benchmark the update of a branchmap
2492 """benchmark the update of a branchmap
2493
2493
2494 This benchmarks the full repo.branchmap() call with read and write disabled
2494 This benchmarks the full repo.branchmap() call with read and write disabled
2495 """
2495 """
2496 opts = _byteskwargs(opts)
2496 opts = _byteskwargs(opts)
2497 full = opts.get(b"full", False)
2497 full = opts.get(b"full", False)
2498 clear_revbranch = opts.get(b"clear_revbranch", False)
2498 clear_revbranch = opts.get(b"clear_revbranch", False)
2499 timer, fm = gettimer(ui, opts)
2499 timer, fm = gettimer(ui, opts)
2500 def getbranchmap(filtername):
2500 def getbranchmap(filtername):
2501 """generate a benchmark function for the filtername"""
2501 """generate a benchmark function for the filtername"""
2502 if filtername is None:
2502 if filtername is None:
2503 view = repo
2503 view = repo
2504 else:
2504 else:
2505 view = repo.filtered(filtername)
2505 view = repo.filtered(filtername)
2506 if util.safehasattr(view._branchcaches, '_per_filter'):
2506 if util.safehasattr(view._branchcaches, '_per_filter'):
2507 filtered = view._branchcaches._per_filter
2507 filtered = view._branchcaches._per_filter
2508 else:
2508 else:
2509 # older versions
2509 # older versions
2510 filtered = view._branchcaches
2510 filtered = view._branchcaches
2511 def d():
2511 def d():
2512 if clear_revbranch:
2512 if clear_revbranch:
2513 repo.revbranchcache()._clear()
2513 repo.revbranchcache()._clear()
2514 if full:
2514 if full:
2515 view._branchcaches.clear()
2515 view._branchcaches.clear()
2516 else:
2516 else:
2517 filtered.pop(filtername, None)
2517 filtered.pop(filtername, None)
2518 view.branchmap()
2518 view.branchmap()
2519 return d
2519 return d
2520 # add filter in smaller subset to bigger subset
2520 # add filter in smaller subset to bigger subset
2521 possiblefilters = set(repoview.filtertable)
2521 possiblefilters = set(repoview.filtertable)
2522 if filternames:
2522 if filternames:
2523 possiblefilters &= set(filternames)
2523 possiblefilters &= set(filternames)
2524 subsettable = getbranchmapsubsettable()
2524 subsettable = getbranchmapsubsettable()
2525 allfilters = []
2525 allfilters = []
2526 while possiblefilters:
2526 while possiblefilters:
2527 for name in possiblefilters:
2527 for name in possiblefilters:
2528 subset = subsettable.get(name)
2528 subset = subsettable.get(name)
2529 if subset not in possiblefilters:
2529 if subset not in possiblefilters:
2530 break
2530 break
2531 else:
2531 else:
2532 assert False, b'subset cycle %s!' % possiblefilters
2532 assert False, b'subset cycle %s!' % possiblefilters
2533 allfilters.append(name)
2533 allfilters.append(name)
2534 possiblefilters.remove(name)
2534 possiblefilters.remove(name)
2535
2535
2536 # warm the cache
2536 # warm the cache
2537 if not full:
2537 if not full:
2538 for name in allfilters:
2538 for name in allfilters:
2539 repo.filtered(name).branchmap()
2539 repo.filtered(name).branchmap()
2540 if not filternames or b'unfiltered' in filternames:
2540 if not filternames or b'unfiltered' in filternames:
2541 # add unfiltered
2541 # add unfiltered
2542 allfilters.append(None)
2542 allfilters.append(None)
2543
2543
2544 if util.safehasattr(branchmap.branchcache, 'fromfile'):
2544 if util.safehasattr(branchmap.branchcache, 'fromfile'):
2545 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
2545 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
2546 branchcacheread.set(classmethod(lambda *args: None))
2546 branchcacheread.set(classmethod(lambda *args: None))
2547 else:
2547 else:
2548 # older versions
2548 # older versions
2549 branchcacheread = safeattrsetter(branchmap, b'read')
2549 branchcacheread = safeattrsetter(branchmap, b'read')
2550 branchcacheread.set(lambda *args: None)
2550 branchcacheread.set(lambda *args: None)
2551 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
2551 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
2552 branchcachewrite.set(lambda *args: None)
2552 branchcachewrite.set(lambda *args: None)
2553 try:
2553 try:
2554 for name in allfilters:
2554 for name in allfilters:
2555 printname = name
2555 printname = name
2556 if name is None:
2556 if name is None:
2557 printname = b'unfiltered'
2557 printname = b'unfiltered'
2558 timer(getbranchmap(name), title=str(printname))
2558 timer(getbranchmap(name), title=str(printname))
2559 finally:
2559 finally:
2560 branchcacheread.restore()
2560 branchcacheread.restore()
2561 branchcachewrite.restore()
2561 branchcachewrite.restore()
2562 fm.end()
2562 fm.end()
2563
2563
2564 @command(b'perfbranchmapupdate', [
2564 @command(b'perfbranchmapupdate', [
2565 (b'', b'base', [], b'subset of revision to start from'),
2565 (b'', b'base', [], b'subset of revision to start from'),
2566 (b'', b'target', [], b'subset of revision to end with'),
2566 (b'', b'target', [], b'subset of revision to end with'),
2567 (b'', b'clear-caches', False, b'clear cache between each runs')
2567 (b'', b'clear-caches', False, b'clear cache between each runs')
2568 ] + formatteropts)
2568 ] + formatteropts)
2569 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
2569 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
2570 """benchmark branchmap update from for <base> revs to <target> revs
2570 """benchmark branchmap update from for <base> revs to <target> revs
2571
2571
2572 If `--clear-caches` is passed, the following items will be reset before
2572 If `--clear-caches` is passed, the following items will be reset before
2573 each update:
2573 each update:
2574 * the changelog instance and associated indexes
2574 * the changelog instance and associated indexes
2575 * the rev-branch-cache instance
2575 * the rev-branch-cache instance
2576
2576
2577 Examples:
2577 Examples:
2578
2578
2579 # update for the one last revision
2579 # update for the one last revision
2580 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
2580 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
2581
2581
2582 $ update for change coming with a new branch
2582 $ update for change coming with a new branch
2583 $ hg perfbranchmapupdate --base 'stable' --target 'default'
2583 $ hg perfbranchmapupdate --base 'stable' --target 'default'
2584 """
2584 """
2585 from mercurial import branchmap
2585 from mercurial import branchmap
2586 from mercurial import repoview
2586 from mercurial import repoview
2587 opts = _byteskwargs(opts)
2587 opts = _byteskwargs(opts)
2588 timer, fm = gettimer(ui, opts)
2588 timer, fm = gettimer(ui, opts)
2589 clearcaches = opts[b'clear_caches']
2589 clearcaches = opts[b'clear_caches']
2590 unfi = repo.unfiltered()
2590 unfi = repo.unfiltered()
2591 x = [None] # used to pass data between closure
2591 x = [None] # used to pass data between closure
2592
2592
2593 # we use a `list` here to avoid possible side effect from smartset
2593 # we use a `list` here to avoid possible side effect from smartset
2594 baserevs = list(scmutil.revrange(repo, base))
2594 baserevs = list(scmutil.revrange(repo, base))
2595 targetrevs = list(scmutil.revrange(repo, target))
2595 targetrevs = list(scmutil.revrange(repo, target))
2596 if not baserevs:
2596 if not baserevs:
2597 raise error.Abort(b'no revisions selected for --base')
2597 raise error.Abort(b'no revisions selected for --base')
2598 if not targetrevs:
2598 if not targetrevs:
2599 raise error.Abort(b'no revisions selected for --target')
2599 raise error.Abort(b'no revisions selected for --target')
2600
2600
2601 # make sure the target branchmap also contains the one in the base
2601 # make sure the target branchmap also contains the one in the base
2602 targetrevs = list(set(baserevs) | set(targetrevs))
2602 targetrevs = list(set(baserevs) | set(targetrevs))
2603 targetrevs.sort()
2603 targetrevs.sort()
2604
2604
2605 cl = repo.changelog
2605 cl = repo.changelog
2606 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
2606 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
2607 allbaserevs.sort()
2607 allbaserevs.sort()
2608 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
2608 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
2609
2609
2610 newrevs = list(alltargetrevs.difference(allbaserevs))
2610 newrevs = list(alltargetrevs.difference(allbaserevs))
2611 newrevs.sort()
2611 newrevs.sort()
2612
2612
2613 allrevs = frozenset(unfi.changelog.revs())
2613 allrevs = frozenset(unfi.changelog.revs())
2614 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
2614 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
2615 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
2615 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
2616
2616
2617 def basefilter(repo, visibilityexceptions=None):
2617 def basefilter(repo, visibilityexceptions=None):
2618 return basefilterrevs
2618 return basefilterrevs
2619
2619
2620 def targetfilter(repo, visibilityexceptions=None):
2620 def targetfilter(repo, visibilityexceptions=None):
2621 return targetfilterrevs
2621 return targetfilterrevs
2622
2622
2623 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
2623 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
2624 ui.status(msg % (len(allbaserevs), len(newrevs)))
2624 ui.status(msg % (len(allbaserevs), len(newrevs)))
2625 if targetfilterrevs:
2625 if targetfilterrevs:
2626 msg = b'(%d revisions still filtered)\n'
2626 msg = b'(%d revisions still filtered)\n'
2627 ui.status(msg % len(targetfilterrevs))
2627 ui.status(msg % len(targetfilterrevs))
2628
2628
2629 try:
2629 try:
2630 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
2630 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
2631 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
2631 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
2632
2632
2633 baserepo = repo.filtered(b'__perf_branchmap_update_base')
2633 baserepo = repo.filtered(b'__perf_branchmap_update_base')
2634 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
2634 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
2635
2635
2636 # try to find an existing branchmap to reuse
2636 # try to find an existing branchmap to reuse
2637 subsettable = getbranchmapsubsettable()
2637 subsettable = getbranchmapsubsettable()
2638 candidatefilter = subsettable.get(None)
2638 candidatefilter = subsettable.get(None)
2639 while candidatefilter is not None:
2639 while candidatefilter is not None:
2640 candidatebm = repo.filtered(candidatefilter).branchmap()
2640 candidatebm = repo.filtered(candidatefilter).branchmap()
2641 if candidatebm.validfor(baserepo):
2641 if candidatebm.validfor(baserepo):
2642 filtered = repoview.filterrevs(repo, candidatefilter)
2642 filtered = repoview.filterrevs(repo, candidatefilter)
2643 missing = [r for r in allbaserevs if r in filtered]
2643 missing = [r for r in allbaserevs if r in filtered]
2644 base = candidatebm.copy()
2644 base = candidatebm.copy()
2645 base.update(baserepo, missing)
2645 base.update(baserepo, missing)
2646 break
2646 break
2647 candidatefilter = subsettable.get(candidatefilter)
2647 candidatefilter = subsettable.get(candidatefilter)
2648 else:
2648 else:
2649 # no suitable subset where found
2649 # no suitable subset where found
2650 base = branchmap.branchcache()
2650 base = branchmap.branchcache()
2651 base.update(baserepo, allbaserevs)
2651 base.update(baserepo, allbaserevs)
2652
2652
2653 def setup():
2653 def setup():
2654 x[0] = base.copy()
2654 x[0] = base.copy()
2655 if clearcaches:
2655 if clearcaches:
2656 unfi._revbranchcache = None
2656 unfi._revbranchcache = None
2657 clearchangelog(repo)
2657 clearchangelog(repo)
2658
2658
2659 def bench():
2659 def bench():
2660 x[0].update(targetrepo, newrevs)
2660 x[0].update(targetrepo, newrevs)
2661
2661
2662 timer(bench, setup=setup)
2662 timer(bench, setup=setup)
2663 fm.end()
2663 fm.end()
2664 finally:
2664 finally:
2665 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
2665 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
2666 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
2666 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
2667
2667
2668 @command(b'perfbranchmapload', [
2668 @command(b'perfbranchmapload', [
2669 (b'f', b'filter', b'', b'Specify repoview filter'),
2669 (b'f', b'filter', b'', b'Specify repoview filter'),
2670 (b'', b'list', False, b'List brachmap filter caches'),
2670 (b'', b'list', False, b'List brachmap filter caches'),
2671 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
2671 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
2672
2672
2673 ] + formatteropts)
2673 ] + formatteropts)
2674 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
2674 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
2675 """benchmark reading the branchmap"""
2675 """benchmark reading the branchmap"""
2676 opts = _byteskwargs(opts)
2676 opts = _byteskwargs(opts)
2677 clearrevlogs = opts[b'clear_revlogs']
2677 clearrevlogs = opts[b'clear_revlogs']
2678
2678
2679 if list:
2679 if list:
2680 for name, kind, st in repo.cachevfs.readdir(stat=True):
2680 for name, kind, st in repo.cachevfs.readdir(stat=True):
2681 if name.startswith(b'branch2'):
2681 if name.startswith(b'branch2'):
2682 filtername = name.partition(b'-')[2] or b'unfiltered'
2682 filtername = name.partition(b'-')[2] or b'unfiltered'
2683 ui.status(b'%s - %s\n'
2683 ui.status(b'%s - %s\n'
2684 % (filtername, util.bytecount(st.st_size)))
2684 % (filtername, util.bytecount(st.st_size)))
2685 return
2685 return
2686 if not filter:
2686 if not filter:
2687 filter = None
2687 filter = None
2688 subsettable = getbranchmapsubsettable()
2688 subsettable = getbranchmapsubsettable()
2689 if filter is None:
2689 if filter is None:
2690 repo = repo.unfiltered()
2690 repo = repo.unfiltered()
2691 else:
2691 else:
2692 repo = repoview.repoview(repo, filter)
2692 repo = repoview.repoview(repo, filter)
2693
2693
2694 repo.branchmap() # make sure we have a relevant, up to date branchmap
2694 repo.branchmap() # make sure we have a relevant, up to date branchmap
2695
2695
2696 try:
2696 try:
2697 fromfile = branchmap.branchcache.fromfile
2697 fromfile = branchmap.branchcache.fromfile
2698 except AttributeError:
2698 except AttributeError:
2699 # older versions
2699 # older versions
2700 fromfile = branchmap.read
2700 fromfile = branchmap.read
2701
2701
2702 currentfilter = filter
2702 currentfilter = filter
2703 # try once without timer, the filter may not be cached
2703 # try once without timer, the filter may not be cached
2704 while fromfile(repo) is None:
2704 while fromfile(repo) is None:
2705 currentfilter = subsettable.get(currentfilter)
2705 currentfilter = subsettable.get(currentfilter)
2706 if currentfilter is None:
2706 if currentfilter is None:
2707 raise error.Abort(b'No branchmap cached for %s repo'
2707 raise error.Abort(b'No branchmap cached for %s repo'
2708 % (filter or b'unfiltered'))
2708 % (filter or b'unfiltered'))
2709 repo = repo.filtered(currentfilter)
2709 repo = repo.filtered(currentfilter)
2710 timer, fm = gettimer(ui, opts)
2710 timer, fm = gettimer(ui, opts)
2711 def setup():
2711 def setup():
2712 if clearrevlogs:
2712 if clearrevlogs:
2713 clearchangelog(repo)
2713 clearchangelog(repo)
2714 def bench():
2714 def bench():
2715 fromfile(repo)
2715 fromfile(repo)
2716 timer(bench, setup=setup)
2716 timer(bench, setup=setup)
2717 fm.end()
2717 fm.end()
2718
2718
2719 @command(b'perfloadmarkers')
2719 @command(b'perfloadmarkers')
2720 def perfloadmarkers(ui, repo):
2720 def perfloadmarkers(ui, repo):
2721 """benchmark the time to parse the on-disk markers for a repo
2721 """benchmark the time to parse the on-disk markers for a repo
2722
2722
2723 Result is the number of markers in the repo."""
2723 Result is the number of markers in the repo."""
2724 timer, fm = gettimer(ui)
2724 timer, fm = gettimer(ui)
2725 svfs = getsvfs(repo)
2725 svfs = getsvfs(repo)
2726 timer(lambda: len(obsolete.obsstore(svfs)))
2726 timer(lambda: len(obsolete.obsstore(svfs)))
2727 fm.end()
2727 fm.end()
2728
2728
2729 @command(b'perflrucachedict', formatteropts +
2729 @command(b'perflrucachedict', formatteropts +
2730 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
2730 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
2731 (b'', b'mincost', 0, b'smallest cost of items in cache'),
2731 (b'', b'mincost', 0, b'smallest cost of items in cache'),
2732 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
2732 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
2733 (b'', b'size', 4, b'size of cache'),
2733 (b'', b'size', 4, b'size of cache'),
2734 (b'', b'gets', 10000, b'number of key lookups'),
2734 (b'', b'gets', 10000, b'number of key lookups'),
2735 (b'', b'sets', 10000, b'number of key sets'),
2735 (b'', b'sets', 10000, b'number of key sets'),
2736 (b'', b'mixed', 10000, b'number of mixed mode operations'),
2736 (b'', b'mixed', 10000, b'number of mixed mode operations'),
2737 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
2737 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
2738 norepo=True)
2738 norepo=True)
2739 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
2739 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
2740 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
2740 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
2741 opts = _byteskwargs(opts)
2741 opts = _byteskwargs(opts)
2742
2742
2743 def doinit():
2743 def doinit():
2744 for i in _xrange(10000):
2744 for i in _xrange(10000):
2745 util.lrucachedict(size)
2745 util.lrucachedict(size)
2746
2746
2747 costrange = list(range(mincost, maxcost + 1))
2747 costrange = list(range(mincost, maxcost + 1))
2748
2748
2749 values = []
2749 values = []
2750 for i in _xrange(size):
2750 for i in _xrange(size):
2751 values.append(random.randint(0, _maxint))
2751 values.append(random.randint(0, _maxint))
2752
2752
2753 # Get mode fills the cache and tests raw lookup performance with no
2753 # Get mode fills the cache and tests raw lookup performance with no
2754 # eviction.
2754 # eviction.
2755 getseq = []
2755 getseq = []
2756 for i in _xrange(gets):
2756 for i in _xrange(gets):
2757 getseq.append(random.choice(values))
2757 getseq.append(random.choice(values))
2758
2758
2759 def dogets():
2759 def dogets():
2760 d = util.lrucachedict(size)
2760 d = util.lrucachedict(size)
2761 for v in values:
2761 for v in values:
2762 d[v] = v
2762 d[v] = v
2763 for key in getseq:
2763 for key in getseq:
2764 value = d[key]
2764 value = d[key]
2765 value # silence pyflakes warning
2765 value # silence pyflakes warning
2766
2766
2767 def dogetscost():
2767 def dogetscost():
2768 d = util.lrucachedict(size, maxcost=costlimit)
2768 d = util.lrucachedict(size, maxcost=costlimit)
2769 for i, v in enumerate(values):
2769 for i, v in enumerate(values):
2770 d.insert(v, v, cost=costs[i])
2770 d.insert(v, v, cost=costs[i])
2771 for key in getseq:
2771 for key in getseq:
2772 try:
2772 try:
2773 value = d[key]
2773 value = d[key]
2774 value # silence pyflakes warning
2774 value # silence pyflakes warning
2775 except KeyError:
2775 except KeyError:
2776 pass
2776 pass
2777
2777
2778 # Set mode tests insertion speed with cache eviction.
2778 # Set mode tests insertion speed with cache eviction.
2779 setseq = []
2779 setseq = []
2780 costs = []
2780 costs = []
2781 for i in _xrange(sets):
2781 for i in _xrange(sets):
2782 setseq.append(random.randint(0, _maxint))
2782 setseq.append(random.randint(0, _maxint))
2783 costs.append(random.choice(costrange))
2783 costs.append(random.choice(costrange))
2784
2784
2785 def doinserts():
2785 def doinserts():
2786 d = util.lrucachedict(size)
2786 d = util.lrucachedict(size)
2787 for v in setseq:
2787 for v in setseq:
2788 d.insert(v, v)
2788 d.insert(v, v)
2789
2789
2790 def doinsertscost():
2790 def doinsertscost():
2791 d = util.lrucachedict(size, maxcost=costlimit)
2791 d = util.lrucachedict(size, maxcost=costlimit)
2792 for i, v in enumerate(setseq):
2792 for i, v in enumerate(setseq):
2793 d.insert(v, v, cost=costs[i])
2793 d.insert(v, v, cost=costs[i])
2794
2794
2795 def dosets():
2795 def dosets():
2796 d = util.lrucachedict(size)
2796 d = util.lrucachedict(size)
2797 for v in setseq:
2797 for v in setseq:
2798 d[v] = v
2798 d[v] = v
2799
2799
2800 # Mixed mode randomly performs gets and sets with eviction.
2800 # Mixed mode randomly performs gets and sets with eviction.
2801 mixedops = []
2801 mixedops = []
2802 for i in _xrange(mixed):
2802 for i in _xrange(mixed):
2803 r = random.randint(0, 100)
2803 r = random.randint(0, 100)
2804 if r < mixedgetfreq:
2804 if r < mixedgetfreq:
2805 op = 0
2805 op = 0
2806 else:
2806 else:
2807 op = 1
2807 op = 1
2808
2808
2809 mixedops.append((op,
2809 mixedops.append((op,
2810 random.randint(0, size * 2),
2810 random.randint(0, size * 2),
2811 random.choice(costrange)))
2811 random.choice(costrange)))
2812
2812
2813 def domixed():
2813 def domixed():
2814 d = util.lrucachedict(size)
2814 d = util.lrucachedict(size)
2815
2815
2816 for op, v, cost in mixedops:
2816 for op, v, cost in mixedops:
2817 if op == 0:
2817 if op == 0:
2818 try:
2818 try:
2819 d[v]
2819 d[v]
2820 except KeyError:
2820 except KeyError:
2821 pass
2821 pass
2822 else:
2822 else:
2823 d[v] = v
2823 d[v] = v
2824
2824
2825 def domixedcost():
2825 def domixedcost():
2826 d = util.lrucachedict(size, maxcost=costlimit)
2826 d = util.lrucachedict(size, maxcost=costlimit)
2827
2827
2828 for op, v, cost in mixedops:
2828 for op, v, cost in mixedops:
2829 if op == 0:
2829 if op == 0:
2830 try:
2830 try:
2831 d[v]
2831 d[v]
2832 except KeyError:
2832 except KeyError:
2833 pass
2833 pass
2834 else:
2834 else:
2835 d.insert(v, v, cost=cost)
2835 d.insert(v, v, cost=cost)
2836
2836
2837 benches = [
2837 benches = [
2838 (doinit, b'init'),
2838 (doinit, b'init'),
2839 ]
2839 ]
2840
2840
2841 if costlimit:
2841 if costlimit:
2842 benches.extend([
2842 benches.extend([
2843 (dogetscost, b'gets w/ cost limit'),
2843 (dogetscost, b'gets w/ cost limit'),
2844 (doinsertscost, b'inserts w/ cost limit'),
2844 (doinsertscost, b'inserts w/ cost limit'),
2845 (domixedcost, b'mixed w/ cost limit'),
2845 (domixedcost, b'mixed w/ cost limit'),
2846 ])
2846 ])
2847 else:
2847 else:
2848 benches.extend([
2848 benches.extend([
2849 (dogets, b'gets'),
2849 (dogets, b'gets'),
2850 (doinserts, b'inserts'),
2850 (doinserts, b'inserts'),
2851 (dosets, b'sets'),
2851 (dosets, b'sets'),
2852 (domixed, b'mixed')
2852 (domixed, b'mixed')
2853 ])
2853 ])
2854
2854
2855 for fn, title in benches:
2855 for fn, title in benches:
2856 timer, fm = gettimer(ui, opts)
2856 timer, fm = gettimer(ui, opts)
2857 timer(fn, title=title)
2857 timer(fn, title=title)
2858 fm.end()
2858 fm.end()
2859
2859
2860 @command(b'perfwrite', formatteropts)
2860 @command(b'perfwrite', formatteropts)
2861 def perfwrite(ui, repo, **opts):
2861 def perfwrite(ui, repo, **opts):
2862 """microbenchmark ui.write
2862 """microbenchmark ui.write
2863 """
2863 """
2864 opts = _byteskwargs(opts)
2864 opts = _byteskwargs(opts)
2865
2865
2866 timer, fm = gettimer(ui, opts)
2866 timer, fm = gettimer(ui, opts)
2867 def write():
2867 def write():
2868 for i in range(100000):
2868 for i in range(100000):
2869 ui.write((b'Testing write performance\n'))
2869 ui.write((b'Testing write performance\n'))
2870 timer(write)
2870 timer(write)
2871 fm.end()
2871 fm.end()
2872
2872
2873 def uisetup(ui):
2873 def uisetup(ui):
2874 if (util.safehasattr(cmdutil, b'openrevlog') and
2874 if (util.safehasattr(cmdutil, b'openrevlog') and
2875 not util.safehasattr(commands, b'debugrevlogopts')):
2875 not util.safehasattr(commands, b'debugrevlogopts')):
2876 # for "historical portability":
2876 # for "historical portability":
2877 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
2877 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
2878 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
2878 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
2879 # openrevlog() should cause failure, because it has been
2879 # openrevlog() should cause failure, because it has been
2880 # available since 3.5 (or 49c583ca48c4).
2880 # available since 3.5 (or 49c583ca48c4).
2881 def openrevlog(orig, repo, cmd, file_, opts):
2881 def openrevlog(orig, repo, cmd, file_, opts):
2882 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
2882 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
2883 raise error.Abort(b"This version doesn't support --dir option",
2883 raise error.Abort(b"This version doesn't support --dir option",
2884 hint=b"use 3.5 or later")
2884 hint=b"use 3.5 or later")
2885 return orig(repo, cmd, file_, opts)
2885 return orig(repo, cmd, file_, opts)
2886 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
2886 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
2887
2887
2888 @command(b'perfprogress', formatteropts + [
2888 @command(b'perfprogress', formatteropts + [
2889 (b'', b'topic', b'topic', b'topic for progress messages'),
2889 (b'', b'topic', b'topic', b'topic for progress messages'),
2890 (b'c', b'total', 1000000, b'total value we are progressing to'),
2890 (b'c', b'total', 1000000, b'total value we are progressing to'),
2891 ], norepo=True)
2891 ], norepo=True)
2892 def perfprogress(ui, topic=None, total=None, **opts):
2892 def perfprogress(ui, topic=None, total=None, **opts):
2893 """printing of progress bars"""
2893 """printing of progress bars"""
2894 opts = _byteskwargs(opts)
2894 opts = _byteskwargs(opts)
2895
2895
2896 timer, fm = gettimer(ui, opts)
2896 timer, fm = gettimer(ui, opts)
2897
2897
2898 def doprogress():
2898 def doprogress():
2899 with ui.makeprogress(topic, total=total) as progress:
2899 with ui.makeprogress(topic, total=total) as progress:
2900 for i in pycompat.xrange(total):
2900 for i in pycompat.xrange(total):
2901 progress.increment()
2901 progress.increment()
2902
2902
2903 timer(doprogress)
2903 timer(doprogress)
2904 fm.end()
2904 fm.end()
General Comments 0
You need to be logged in to leave comments. Login now