##// END OF EJS Templates
perf: don't try to call `util.queue` on Mercurial version before it existed...
Martin von Zweigbergk -
r43052:777a9df5 default
parent child Browse files
Show More
@@ -1,3092 +1,3092 b''
1 # perf.py - performance test routines
1 # perf.py - performance test routines
2 '''helper extension to measure performance
2 '''helper extension to measure performance
3
3
4 Configurations
4 Configurations
5 ==============
5 ==============
6
6
7 ``perf``
7 ``perf``
8 --------
8 --------
9
9
10 ``all-timing``
10 ``all-timing``
11 When set, additional statistics will be reported for each benchmark: best,
11 When set, additional statistics will be reported for each benchmark: best,
12 worst, median average. If not set only the best timing is reported
12 worst, median average. If not set only the best timing is reported
13 (default: off).
13 (default: off).
14
14
15 ``presleep``
15 ``presleep``
16 number of second to wait before any group of runs (default: 1)
16 number of second to wait before any group of runs (default: 1)
17
17
18 ``pre-run``
18 ``pre-run``
19 number of run to perform before starting measurement.
19 number of run to perform before starting measurement.
20
20
21 ``profile-benchmark``
21 ``profile-benchmark``
22 Enable profiling for the benchmarked section.
22 Enable profiling for the benchmarked section.
23 (The first iteration is benchmarked)
23 (The first iteration is benchmarked)
24
24
25 ``run-limits``
25 ``run-limits``
26 Control the number of runs each benchmark will perform. The option value
26 Control the number of runs each benchmark will perform. The option value
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
28 conditions are considered in order with the following logic:
28 conditions are considered in order with the following logic:
29
29
30 If benchmark has been running for <time> seconds, and we have performed
30 If benchmark has been running for <time> seconds, and we have performed
31 <numberofrun> iterations, stop the benchmark,
31 <numberofrun> iterations, stop the benchmark,
32
32
33 The default value is: `3.0-100, 10.0-3`
33 The default value is: `3.0-100, 10.0-3`
34
34
35 ``stub``
35 ``stub``
36 When set, benchmarks will only be run once, useful for testing
36 When set, benchmarks will only be run once, useful for testing
37 (default: off)
37 (default: off)
38 '''
38 '''
39
39
40 # "historical portability" policy of perf.py:
40 # "historical portability" policy of perf.py:
41 #
41 #
42 # We have to do:
42 # We have to do:
43 # - make perf.py "loadable" with as wide Mercurial version as possible
43 # - make perf.py "loadable" with as wide Mercurial version as possible
44 # This doesn't mean that perf commands work correctly with that Mercurial.
44 # This doesn't mean that perf commands work correctly with that Mercurial.
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
46 # - make historical perf command work correctly with as wide Mercurial
46 # - make historical perf command work correctly with as wide Mercurial
47 # version as possible
47 # version as possible
48 #
48 #
49 # We have to do, if possible with reasonable cost:
49 # We have to do, if possible with reasonable cost:
50 # - make recent perf command for historical feature work correctly
50 # - make recent perf command for historical feature work correctly
51 # with early Mercurial
51 # with early Mercurial
52 #
52 #
53 # We don't have to do:
53 # We don't have to do:
54 # - make perf command for recent feature work correctly with early
54 # - make perf command for recent feature work correctly with early
55 # Mercurial
55 # Mercurial
56
56
57 from __future__ import absolute_import
57 from __future__ import absolute_import
58 import contextlib
58 import contextlib
59 import functools
59 import functools
60 import gc
60 import gc
61 import os
61 import os
62 import random
62 import random
63 import shutil
63 import shutil
64 import struct
64 import struct
65 import sys
65 import sys
66 import tempfile
66 import tempfile
67 import threading
67 import threading
68 import time
68 import time
69 from mercurial import (
69 from mercurial import (
70 changegroup,
70 changegroup,
71 cmdutil,
71 cmdutil,
72 commands,
72 commands,
73 copies,
73 copies,
74 error,
74 error,
75 extensions,
75 extensions,
76 hg,
76 hg,
77 mdiff,
77 mdiff,
78 merge,
78 merge,
79 revlog,
79 revlog,
80 util,
80 util,
81 )
81 )
82
82
83 # for "historical portability":
83 # for "historical portability":
84 # try to import modules separately (in dict order), and ignore
84 # try to import modules separately (in dict order), and ignore
85 # failure, because these aren't available with early Mercurial
85 # failure, because these aren't available with early Mercurial
86 try:
86 try:
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
88 except ImportError:
88 except ImportError:
89 pass
89 pass
90 try:
90 try:
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
92 except ImportError:
92 except ImportError:
93 pass
93 pass
94 try:
94 try:
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
96 dir(registrar) # forcibly load it
96 dir(registrar) # forcibly load it
97 except ImportError:
97 except ImportError:
98 registrar = None
98 registrar = None
99 try:
99 try:
100 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
100 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
101 except ImportError:
101 except ImportError:
102 pass
102 pass
103 try:
103 try:
104 from mercurial.utils import repoviewutil # since 5.0
104 from mercurial.utils import repoviewutil # since 5.0
105 except ImportError:
105 except ImportError:
106 repoviewutil = None
106 repoviewutil = None
107 try:
107 try:
108 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
108 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
109 except ImportError:
109 except ImportError:
110 pass
110 pass
111 try:
111 try:
112 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
112 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
113 except ImportError:
113 except ImportError:
114 pass
114 pass
115
115
116 try:
116 try:
117 from mercurial import profiling
117 from mercurial import profiling
118 except ImportError:
118 except ImportError:
119 profiling = None
119 profiling = None
120
120
121 def identity(a):
121 def identity(a):
122 return a
122 return a
123
123
124 try:
124 try:
125 from mercurial import pycompat
125 from mercurial import pycompat
126 getargspec = pycompat.getargspec # added to module after 4.5
126 getargspec = pycompat.getargspec # added to module after 4.5
127 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
127 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
128 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
128 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
129 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
129 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
130 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
130 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
131 if pycompat.ispy3:
131 if pycompat.ispy3:
132 _maxint = sys.maxsize # per py3 docs for replacing maxint
132 _maxint = sys.maxsize # per py3 docs for replacing maxint
133 else:
133 else:
134 _maxint = sys.maxint
134 _maxint = sys.maxint
135 except (NameError, ImportError, AttributeError):
135 except (NameError, ImportError, AttributeError):
136 import inspect
136 import inspect
137 getargspec = inspect.getargspec
137 getargspec = inspect.getargspec
138 _byteskwargs = identity
138 _byteskwargs = identity
139 fsencode = identity # no py3 support
139 fsencode = identity # no py3 support
140 _maxint = sys.maxint # no py3 support
140 _maxint = sys.maxint # no py3 support
141 _sysstr = lambda x: x # no py3 support
141 _sysstr = lambda x: x # no py3 support
142 _xrange = xrange
142 _xrange = xrange
143
143
144 try:
144 try:
145 # 4.7+
145 # 4.7+
146 queue = pycompat.queue.Queue
146 queue = pycompat.queue.Queue
147 except (NameError, AttributeError, ImportError):
147 except (NameError, AttributeError, ImportError):
148 # <4.7.
148 # <4.7.
149 try:
149 try:
150 queue = pycompat.queue
150 queue = pycompat.queue
151 except (NameError, AttributeError, ImportError):
151 except (NameError, AttributeError, ImportError):
152 queue = util.queue
152 import Queue as queue
153
153
154 try:
154 try:
155 from mercurial import logcmdutil
155 from mercurial import logcmdutil
156 makelogtemplater = logcmdutil.maketemplater
156 makelogtemplater = logcmdutil.maketemplater
157 except (AttributeError, ImportError):
157 except (AttributeError, ImportError):
158 try:
158 try:
159 makelogtemplater = cmdutil.makelogtemplater
159 makelogtemplater = cmdutil.makelogtemplater
160 except (AttributeError, ImportError):
160 except (AttributeError, ImportError):
161 makelogtemplater = None
161 makelogtemplater = None
162
162
163 # for "historical portability":
163 # for "historical portability":
164 # define util.safehasattr forcibly, because util.safehasattr has been
164 # define util.safehasattr forcibly, because util.safehasattr has been
165 # available since 1.9.3 (or 94b200a11cf7)
165 # available since 1.9.3 (or 94b200a11cf7)
166 _undefined = object()
166 _undefined = object()
167 def safehasattr(thing, attr):
167 def safehasattr(thing, attr):
168 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
168 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
169 setattr(util, 'safehasattr', safehasattr)
169 setattr(util, 'safehasattr', safehasattr)
170
170
171 # for "historical portability":
171 # for "historical portability":
172 # define util.timer forcibly, because util.timer has been available
172 # define util.timer forcibly, because util.timer has been available
173 # since ae5d60bb70c9
173 # since ae5d60bb70c9
174 if safehasattr(time, 'perf_counter'):
174 if safehasattr(time, 'perf_counter'):
175 util.timer = time.perf_counter
175 util.timer = time.perf_counter
176 elif os.name == b'nt':
176 elif os.name == b'nt':
177 util.timer = time.clock
177 util.timer = time.clock
178 else:
178 else:
179 util.timer = time.time
179 util.timer = time.time
180
180
181 # for "historical portability":
181 # for "historical portability":
182 # use locally defined empty option list, if formatteropts isn't
182 # use locally defined empty option list, if formatteropts isn't
183 # available, because commands.formatteropts has been available since
183 # available, because commands.formatteropts has been available since
184 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
184 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
185 # available since 2.2 (or ae5f92e154d3)
185 # available since 2.2 (or ae5f92e154d3)
186 formatteropts = getattr(cmdutil, "formatteropts",
186 formatteropts = getattr(cmdutil, "formatteropts",
187 getattr(commands, "formatteropts", []))
187 getattr(commands, "formatteropts", []))
188
188
189 # for "historical portability":
189 # for "historical portability":
190 # use locally defined option list, if debugrevlogopts isn't available,
190 # use locally defined option list, if debugrevlogopts isn't available,
191 # because commands.debugrevlogopts has been available since 3.7 (or
191 # because commands.debugrevlogopts has been available since 3.7 (or
192 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
192 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
193 # since 1.9 (or a79fea6b3e77).
193 # since 1.9 (or a79fea6b3e77).
194 revlogopts = getattr(cmdutil, "debugrevlogopts",
194 revlogopts = getattr(cmdutil, "debugrevlogopts",
195 getattr(commands, "debugrevlogopts", [
195 getattr(commands, "debugrevlogopts", [
196 (b'c', b'changelog', False, (b'open changelog')),
196 (b'c', b'changelog', False, (b'open changelog')),
197 (b'm', b'manifest', False, (b'open manifest')),
197 (b'm', b'manifest', False, (b'open manifest')),
198 (b'', b'dir', False, (b'open directory manifest')),
198 (b'', b'dir', False, (b'open directory manifest')),
199 ]))
199 ]))
200
200
201 cmdtable = {}
201 cmdtable = {}
202
202
203 # for "historical portability":
203 # for "historical portability":
204 # define parsealiases locally, because cmdutil.parsealiases has been
204 # define parsealiases locally, because cmdutil.parsealiases has been
205 # available since 1.5 (or 6252852b4332)
205 # available since 1.5 (or 6252852b4332)
206 def parsealiases(cmd):
206 def parsealiases(cmd):
207 return cmd.split(b"|")
207 return cmd.split(b"|")
208
208
209 if safehasattr(registrar, 'command'):
209 if safehasattr(registrar, 'command'):
210 command = registrar.command(cmdtable)
210 command = registrar.command(cmdtable)
211 elif safehasattr(cmdutil, 'command'):
211 elif safehasattr(cmdutil, 'command'):
212 command = cmdutil.command(cmdtable)
212 command = cmdutil.command(cmdtable)
213 if b'norepo' not in getargspec(command).args:
213 if b'norepo' not in getargspec(command).args:
214 # for "historical portability":
214 # for "historical portability":
215 # wrap original cmdutil.command, because "norepo" option has
215 # wrap original cmdutil.command, because "norepo" option has
216 # been available since 3.1 (or 75a96326cecb)
216 # been available since 3.1 (or 75a96326cecb)
217 _command = command
217 _command = command
218 def command(name, options=(), synopsis=None, norepo=False):
218 def command(name, options=(), synopsis=None, norepo=False):
219 if norepo:
219 if norepo:
220 commands.norepo += b' %s' % b' '.join(parsealiases(name))
220 commands.norepo += b' %s' % b' '.join(parsealiases(name))
221 return _command(name, list(options), synopsis)
221 return _command(name, list(options), synopsis)
222 else:
222 else:
223 # for "historical portability":
223 # for "historical portability":
224 # define "@command" annotation locally, because cmdutil.command
224 # define "@command" annotation locally, because cmdutil.command
225 # has been available since 1.9 (or 2daa5179e73f)
225 # has been available since 1.9 (or 2daa5179e73f)
226 def command(name, options=(), synopsis=None, norepo=False):
226 def command(name, options=(), synopsis=None, norepo=False):
227 def decorator(func):
227 def decorator(func):
228 if synopsis:
228 if synopsis:
229 cmdtable[name] = func, list(options), synopsis
229 cmdtable[name] = func, list(options), synopsis
230 else:
230 else:
231 cmdtable[name] = func, list(options)
231 cmdtable[name] = func, list(options)
232 if norepo:
232 if norepo:
233 commands.norepo += b' %s' % b' '.join(parsealiases(name))
233 commands.norepo += b' %s' % b' '.join(parsealiases(name))
234 return func
234 return func
235 return decorator
235 return decorator
236
236
237 try:
237 try:
238 import mercurial.registrar
238 import mercurial.registrar
239 import mercurial.configitems
239 import mercurial.configitems
240 configtable = {}
240 configtable = {}
241 configitem = mercurial.registrar.configitem(configtable)
241 configitem = mercurial.registrar.configitem(configtable)
242 configitem(b'perf', b'presleep',
242 configitem(b'perf', b'presleep',
243 default=mercurial.configitems.dynamicdefault,
243 default=mercurial.configitems.dynamicdefault,
244 experimental=True,
244 experimental=True,
245 )
245 )
246 configitem(b'perf', b'stub',
246 configitem(b'perf', b'stub',
247 default=mercurial.configitems.dynamicdefault,
247 default=mercurial.configitems.dynamicdefault,
248 experimental=True,
248 experimental=True,
249 )
249 )
250 configitem(b'perf', b'parentscount',
250 configitem(b'perf', b'parentscount',
251 default=mercurial.configitems.dynamicdefault,
251 default=mercurial.configitems.dynamicdefault,
252 experimental=True,
252 experimental=True,
253 )
253 )
254 configitem(b'perf', b'all-timing',
254 configitem(b'perf', b'all-timing',
255 default=mercurial.configitems.dynamicdefault,
255 default=mercurial.configitems.dynamicdefault,
256 experimental=True,
256 experimental=True,
257 )
257 )
258 configitem(b'perf', b'pre-run',
258 configitem(b'perf', b'pre-run',
259 default=mercurial.configitems.dynamicdefault,
259 default=mercurial.configitems.dynamicdefault,
260 )
260 )
261 configitem(b'perf', b'profile-benchmark',
261 configitem(b'perf', b'profile-benchmark',
262 default=mercurial.configitems.dynamicdefault,
262 default=mercurial.configitems.dynamicdefault,
263 )
263 )
264 configitem(b'perf', b'run-limits',
264 configitem(b'perf', b'run-limits',
265 default=mercurial.configitems.dynamicdefault,
265 default=mercurial.configitems.dynamicdefault,
266 experimental=True,
266 experimental=True,
267 )
267 )
268 except (ImportError, AttributeError):
268 except (ImportError, AttributeError):
269 pass
269 pass
270 except TypeError:
270 except TypeError:
271 # compatibility fix for a11fd395e83f
271 # compatibility fix for a11fd395e83f
272 # hg version: 5.2
272 # hg version: 5.2
273 configitem(b'perf', b'presleep',
273 configitem(b'perf', b'presleep',
274 default=mercurial.configitems.dynamicdefault,
274 default=mercurial.configitems.dynamicdefault,
275 )
275 )
276 configitem(b'perf', b'stub',
276 configitem(b'perf', b'stub',
277 default=mercurial.configitems.dynamicdefault,
277 default=mercurial.configitems.dynamicdefault,
278 )
278 )
279 configitem(b'perf', b'parentscount',
279 configitem(b'perf', b'parentscount',
280 default=mercurial.configitems.dynamicdefault,
280 default=mercurial.configitems.dynamicdefault,
281 )
281 )
282 configitem(b'perf', b'all-timing',
282 configitem(b'perf', b'all-timing',
283 default=mercurial.configitems.dynamicdefault,
283 default=mercurial.configitems.dynamicdefault,
284 )
284 )
285 configitem(b'perf', b'pre-run',
285 configitem(b'perf', b'pre-run',
286 default=mercurial.configitems.dynamicdefault,
286 default=mercurial.configitems.dynamicdefault,
287 )
287 )
288 configitem(b'perf', b'profile-benchmark',
288 configitem(b'perf', b'profile-benchmark',
289 default=mercurial.configitems.dynamicdefault,
289 default=mercurial.configitems.dynamicdefault,
290 )
290 )
291 configitem(b'perf', b'run-limits',
291 configitem(b'perf', b'run-limits',
292 default=mercurial.configitems.dynamicdefault,
292 default=mercurial.configitems.dynamicdefault,
293 )
293 )
294
294
295 def getlen(ui):
295 def getlen(ui):
296 if ui.configbool(b"perf", b"stub", False):
296 if ui.configbool(b"perf", b"stub", False):
297 return lambda x: 1
297 return lambda x: 1
298 return len
298 return len
299
299
300 class noop(object):
300 class noop(object):
301 """dummy context manager"""
301 """dummy context manager"""
302 def __enter__(self):
302 def __enter__(self):
303 pass
303 pass
304 def __exit__(self, *args):
304 def __exit__(self, *args):
305 pass
305 pass
306
306
307 NOOPCTX = noop()
307 NOOPCTX = noop()
308
308
309 def gettimer(ui, opts=None):
309 def gettimer(ui, opts=None):
310 """return a timer function and formatter: (timer, formatter)
310 """return a timer function and formatter: (timer, formatter)
311
311
312 This function exists to gather the creation of formatter in a single
312 This function exists to gather the creation of formatter in a single
313 place instead of duplicating it in all performance commands."""
313 place instead of duplicating it in all performance commands."""
314
314
315 # enforce an idle period before execution to counteract power management
315 # enforce an idle period before execution to counteract power management
316 # experimental config: perf.presleep
316 # experimental config: perf.presleep
317 time.sleep(getint(ui, b"perf", b"presleep", 1))
317 time.sleep(getint(ui, b"perf", b"presleep", 1))
318
318
319 if opts is None:
319 if opts is None:
320 opts = {}
320 opts = {}
321 # redirect all to stderr unless buffer api is in use
321 # redirect all to stderr unless buffer api is in use
322 if not ui._buffers:
322 if not ui._buffers:
323 ui = ui.copy()
323 ui = ui.copy()
324 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
324 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
325 if uifout:
325 if uifout:
326 # for "historical portability":
326 # for "historical portability":
327 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
327 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
328 uifout.set(ui.ferr)
328 uifout.set(ui.ferr)
329
329
330 # get a formatter
330 # get a formatter
331 uiformatter = getattr(ui, 'formatter', None)
331 uiformatter = getattr(ui, 'formatter', None)
332 if uiformatter:
332 if uiformatter:
333 fm = uiformatter(b'perf', opts)
333 fm = uiformatter(b'perf', opts)
334 else:
334 else:
335 # for "historical portability":
335 # for "historical portability":
336 # define formatter locally, because ui.formatter has been
336 # define formatter locally, because ui.formatter has been
337 # available since 2.2 (or ae5f92e154d3)
337 # available since 2.2 (or ae5f92e154d3)
338 from mercurial import node
338 from mercurial import node
339 class defaultformatter(object):
339 class defaultformatter(object):
340 """Minimized composition of baseformatter and plainformatter
340 """Minimized composition of baseformatter and plainformatter
341 """
341 """
342 def __init__(self, ui, topic, opts):
342 def __init__(self, ui, topic, opts):
343 self._ui = ui
343 self._ui = ui
344 if ui.debugflag:
344 if ui.debugflag:
345 self.hexfunc = node.hex
345 self.hexfunc = node.hex
346 else:
346 else:
347 self.hexfunc = node.short
347 self.hexfunc = node.short
348 def __nonzero__(self):
348 def __nonzero__(self):
349 return False
349 return False
350 __bool__ = __nonzero__
350 __bool__ = __nonzero__
351 def startitem(self):
351 def startitem(self):
352 pass
352 pass
353 def data(self, **data):
353 def data(self, **data):
354 pass
354 pass
355 def write(self, fields, deftext, *fielddata, **opts):
355 def write(self, fields, deftext, *fielddata, **opts):
356 self._ui.write(deftext % fielddata, **opts)
356 self._ui.write(deftext % fielddata, **opts)
357 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
357 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
358 if cond:
358 if cond:
359 self._ui.write(deftext % fielddata, **opts)
359 self._ui.write(deftext % fielddata, **opts)
360 def plain(self, text, **opts):
360 def plain(self, text, **opts):
361 self._ui.write(text, **opts)
361 self._ui.write(text, **opts)
362 def end(self):
362 def end(self):
363 pass
363 pass
364 fm = defaultformatter(ui, b'perf', opts)
364 fm = defaultformatter(ui, b'perf', opts)
365
365
366 # stub function, runs code only once instead of in a loop
366 # stub function, runs code only once instead of in a loop
367 # experimental config: perf.stub
367 # experimental config: perf.stub
368 if ui.configbool(b"perf", b"stub", False):
368 if ui.configbool(b"perf", b"stub", False):
369 return functools.partial(stub_timer, fm), fm
369 return functools.partial(stub_timer, fm), fm
370
370
371 # experimental config: perf.all-timing
371 # experimental config: perf.all-timing
372 displayall = ui.configbool(b"perf", b"all-timing", False)
372 displayall = ui.configbool(b"perf", b"all-timing", False)
373
373
374 # experimental config: perf.run-limits
374 # experimental config: perf.run-limits
375 limitspec = ui.configlist(b"perf", b"run-limits", [])
375 limitspec = ui.configlist(b"perf", b"run-limits", [])
376 limits = []
376 limits = []
377 for item in limitspec:
377 for item in limitspec:
378 parts = item.split(b'-', 1)
378 parts = item.split(b'-', 1)
379 if len(parts) < 2:
379 if len(parts) < 2:
380 ui.warn((b'malformatted run limit entry, missing "-": %s\n'
380 ui.warn((b'malformatted run limit entry, missing "-": %s\n'
381 % item))
381 % item))
382 continue
382 continue
383 try:
383 try:
384 time_limit = float(pycompat.sysstr(parts[0]))
384 time_limit = float(pycompat.sysstr(parts[0]))
385 except ValueError as e:
385 except ValueError as e:
386 ui.warn((b'malformatted run limit entry, %s: %s\n'
386 ui.warn((b'malformatted run limit entry, %s: %s\n'
387 % (pycompat.bytestr(e), item)))
387 % (pycompat.bytestr(e), item)))
388 continue
388 continue
389 try:
389 try:
390 run_limit = int(pycompat.sysstr(parts[1]))
390 run_limit = int(pycompat.sysstr(parts[1]))
391 except ValueError as e:
391 except ValueError as e:
392 ui.warn((b'malformatted run limit entry, %s: %s\n'
392 ui.warn((b'malformatted run limit entry, %s: %s\n'
393 % (pycompat.bytestr(e), item)))
393 % (pycompat.bytestr(e), item)))
394 continue
394 continue
395 limits.append((time_limit, run_limit))
395 limits.append((time_limit, run_limit))
396 if not limits:
396 if not limits:
397 limits = DEFAULTLIMITS
397 limits = DEFAULTLIMITS
398
398
399 profiler = None
399 profiler = None
400 if profiling is not None:
400 if profiling is not None:
401 if ui.configbool(b"perf", b"profile-benchmark", False):
401 if ui.configbool(b"perf", b"profile-benchmark", False):
402 profiler = profiling.profile(ui)
402 profiler = profiling.profile(ui)
403
403
404 prerun = getint(ui, b"perf", b"pre-run", 0)
404 prerun = getint(ui, b"perf", b"pre-run", 0)
405 t = functools.partial(_timer, fm, displayall=displayall, limits=limits,
405 t = functools.partial(_timer, fm, displayall=displayall, limits=limits,
406 prerun=prerun, profiler=profiler)
406 prerun=prerun, profiler=profiler)
407 return t, fm
407 return t, fm
408
408
409 def stub_timer(fm, func, setup=None, title=None):
409 def stub_timer(fm, func, setup=None, title=None):
410 if setup is not None:
410 if setup is not None:
411 setup()
411 setup()
412 func()
412 func()
413
413
414 @contextlib.contextmanager
414 @contextlib.contextmanager
415 def timeone():
415 def timeone():
416 r = []
416 r = []
417 ostart = os.times()
417 ostart = os.times()
418 cstart = util.timer()
418 cstart = util.timer()
419 yield r
419 yield r
420 cstop = util.timer()
420 cstop = util.timer()
421 ostop = os.times()
421 ostop = os.times()
422 a, b = ostart, ostop
422 a, b = ostart, ostop
423 r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
423 r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
424
424
425
425
426 # list of stop condition (elapsed time, minimal run count)
426 # list of stop condition (elapsed time, minimal run count)
427 DEFAULTLIMITS = (
427 DEFAULTLIMITS = (
428 (3.0, 100),
428 (3.0, 100),
429 (10.0, 3),
429 (10.0, 3),
430 )
430 )
431
431
432 def _timer(fm, func, setup=None, title=None, displayall=False,
432 def _timer(fm, func, setup=None, title=None, displayall=False,
433 limits=DEFAULTLIMITS, prerun=0, profiler=None):
433 limits=DEFAULTLIMITS, prerun=0, profiler=None):
434 gc.collect()
434 gc.collect()
435 results = []
435 results = []
436 begin = util.timer()
436 begin = util.timer()
437 count = 0
437 count = 0
438 if profiler is None:
438 if profiler is None:
439 profiler = NOOPCTX
439 profiler = NOOPCTX
440 for i in range(prerun):
440 for i in range(prerun):
441 if setup is not None:
441 if setup is not None:
442 setup()
442 setup()
443 func()
443 func()
444 keepgoing = True
444 keepgoing = True
445 while keepgoing:
445 while keepgoing:
446 if setup is not None:
446 if setup is not None:
447 setup()
447 setup()
448 with profiler:
448 with profiler:
449 with timeone() as item:
449 with timeone() as item:
450 r = func()
450 r = func()
451 profiler = NOOPCTX
451 profiler = NOOPCTX
452 count += 1
452 count += 1
453 results.append(item[0])
453 results.append(item[0])
454 cstop = util.timer()
454 cstop = util.timer()
455 # Look for a stop condition.
455 # Look for a stop condition.
456 elapsed = cstop - begin
456 elapsed = cstop - begin
457 for t, mincount in limits:
457 for t, mincount in limits:
458 if elapsed >= t and count >= mincount:
458 if elapsed >= t and count >= mincount:
459 keepgoing = False
459 keepgoing = False
460 break
460 break
461
461
462 formatone(fm, results, title=title, result=r,
462 formatone(fm, results, title=title, result=r,
463 displayall=displayall)
463 displayall=displayall)
464
464
465 def formatone(fm, timings, title=None, result=None, displayall=False):
465 def formatone(fm, timings, title=None, result=None, displayall=False):
466
466
467 count = len(timings)
467 count = len(timings)
468
468
469 fm.startitem()
469 fm.startitem()
470
470
471 if title:
471 if title:
472 fm.write(b'title', b'! %s\n', title)
472 fm.write(b'title', b'! %s\n', title)
473 if result:
473 if result:
474 fm.write(b'result', b'! result: %s\n', result)
474 fm.write(b'result', b'! result: %s\n', result)
475 def display(role, entry):
475 def display(role, entry):
476 prefix = b''
476 prefix = b''
477 if role != b'best':
477 if role != b'best':
478 prefix = b'%s.' % role
478 prefix = b'%s.' % role
479 fm.plain(b'!')
479 fm.plain(b'!')
480 fm.write(prefix + b'wall', b' wall %f', entry[0])
480 fm.write(prefix + b'wall', b' wall %f', entry[0])
481 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
481 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
482 fm.write(prefix + b'user', b' user %f', entry[1])
482 fm.write(prefix + b'user', b' user %f', entry[1])
483 fm.write(prefix + b'sys', b' sys %f', entry[2])
483 fm.write(prefix + b'sys', b' sys %f', entry[2])
484 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
484 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
485 fm.plain(b'\n')
485 fm.plain(b'\n')
486 timings.sort()
486 timings.sort()
487 min_val = timings[0]
487 min_val = timings[0]
488 display(b'best', min_val)
488 display(b'best', min_val)
489 if displayall:
489 if displayall:
490 max_val = timings[-1]
490 max_val = timings[-1]
491 display(b'max', max_val)
491 display(b'max', max_val)
492 avg = tuple([sum(x) / count for x in zip(*timings)])
492 avg = tuple([sum(x) / count for x in zip(*timings)])
493 display(b'avg', avg)
493 display(b'avg', avg)
494 median = timings[len(timings) // 2]
494 median = timings[len(timings) // 2]
495 display(b'median', median)
495 display(b'median', median)
496
496
497 # utilities for historical portability
497 # utilities for historical portability
498
498
499 def getint(ui, section, name, default):
499 def getint(ui, section, name, default):
500 # for "historical portability":
500 # for "historical portability":
501 # ui.configint has been available since 1.9 (or fa2b596db182)
501 # ui.configint has been available since 1.9 (or fa2b596db182)
502 v = ui.config(section, name, None)
502 v = ui.config(section, name, None)
503 if v is None:
503 if v is None:
504 return default
504 return default
505 try:
505 try:
506 return int(v)
506 return int(v)
507 except ValueError:
507 except ValueError:
508 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
508 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
509 % (section, name, v))
509 % (section, name, v))
510
510
511 def safeattrsetter(obj, name, ignoremissing=False):
511 def safeattrsetter(obj, name, ignoremissing=False):
512 """Ensure that 'obj' has 'name' attribute before subsequent setattr
512 """Ensure that 'obj' has 'name' attribute before subsequent setattr
513
513
514 This function is aborted, if 'obj' doesn't have 'name' attribute
514 This function is aborted, if 'obj' doesn't have 'name' attribute
515 at runtime. This avoids overlooking removal of an attribute, which
515 at runtime. This avoids overlooking removal of an attribute, which
516 breaks assumption of performance measurement, in the future.
516 breaks assumption of performance measurement, in the future.
517
517
518 This function returns the object to (1) assign a new value, and
518 This function returns the object to (1) assign a new value, and
519 (2) restore an original value to the attribute.
519 (2) restore an original value to the attribute.
520
520
521 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
521 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
522 abortion, and this function returns None. This is useful to
522 abortion, and this function returns None. This is useful to
523 examine an attribute, which isn't ensured in all Mercurial
523 examine an attribute, which isn't ensured in all Mercurial
524 versions.
524 versions.
525 """
525 """
526 if not util.safehasattr(obj, name):
526 if not util.safehasattr(obj, name):
527 if ignoremissing:
527 if ignoremissing:
528 return None
528 return None
529 raise error.Abort((b"missing attribute %s of %s might break assumption"
529 raise error.Abort((b"missing attribute %s of %s might break assumption"
530 b" of performance measurement") % (name, obj))
530 b" of performance measurement") % (name, obj))
531
531
532 origvalue = getattr(obj, _sysstr(name))
532 origvalue = getattr(obj, _sysstr(name))
533 class attrutil(object):
533 class attrutil(object):
534 def set(self, newvalue):
534 def set(self, newvalue):
535 setattr(obj, _sysstr(name), newvalue)
535 setattr(obj, _sysstr(name), newvalue)
536 def restore(self):
536 def restore(self):
537 setattr(obj, _sysstr(name), origvalue)
537 setattr(obj, _sysstr(name), origvalue)
538
538
539 return attrutil()
539 return attrutil()
540
540
541 # utilities to examine each internal API changes
541 # utilities to examine each internal API changes
542
542
543 def getbranchmapsubsettable():
543 def getbranchmapsubsettable():
544 # for "historical portability":
544 # for "historical portability":
545 # subsettable is defined in:
545 # subsettable is defined in:
546 # - branchmap since 2.9 (or 175c6fd8cacc)
546 # - branchmap since 2.9 (or 175c6fd8cacc)
547 # - repoview since 2.5 (or 59a9f18d4587)
547 # - repoview since 2.5 (or 59a9f18d4587)
548 # - repoviewutil since 5.0
548 # - repoviewutil since 5.0
549 for mod in (branchmap, repoview, repoviewutil):
549 for mod in (branchmap, repoview, repoviewutil):
550 subsettable = getattr(mod, 'subsettable', None)
550 subsettable = getattr(mod, 'subsettable', None)
551 if subsettable:
551 if subsettable:
552 return subsettable
552 return subsettable
553
553
554 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
554 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
555 # branchmap and repoview modules exist, but subsettable attribute
555 # branchmap and repoview modules exist, but subsettable attribute
556 # doesn't)
556 # doesn't)
557 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
557 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
558 hint=b"use 2.5 or later")
558 hint=b"use 2.5 or later")
559
559
560 def getsvfs(repo):
560 def getsvfs(repo):
561 """Return appropriate object to access files under .hg/store
561 """Return appropriate object to access files under .hg/store
562 """
562 """
563 # for "historical portability":
563 # for "historical portability":
564 # repo.svfs has been available since 2.3 (or 7034365089bf)
564 # repo.svfs has been available since 2.3 (or 7034365089bf)
565 svfs = getattr(repo, 'svfs', None)
565 svfs = getattr(repo, 'svfs', None)
566 if svfs:
566 if svfs:
567 return svfs
567 return svfs
568 else:
568 else:
569 return getattr(repo, 'sopener')
569 return getattr(repo, 'sopener')
570
570
571 def getvfs(repo):
571 def getvfs(repo):
572 """Return appropriate object to access files under .hg
572 """Return appropriate object to access files under .hg
573 """
573 """
574 # for "historical portability":
574 # for "historical portability":
575 # repo.vfs has been available since 2.3 (or 7034365089bf)
575 # repo.vfs has been available since 2.3 (or 7034365089bf)
576 vfs = getattr(repo, 'vfs', None)
576 vfs = getattr(repo, 'vfs', None)
577 if vfs:
577 if vfs:
578 return vfs
578 return vfs
579 else:
579 else:
580 return getattr(repo, 'opener')
580 return getattr(repo, 'opener')
581
581
582 def repocleartagscachefunc(repo):
582 def repocleartagscachefunc(repo):
583 """Return the function to clear tags cache according to repo internal API
583 """Return the function to clear tags cache according to repo internal API
584 """
584 """
585 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
585 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
586 # in this case, setattr(repo, '_tagscache', None) or so isn't
586 # in this case, setattr(repo, '_tagscache', None) or so isn't
587 # correct way to clear tags cache, because existing code paths
587 # correct way to clear tags cache, because existing code paths
588 # expect _tagscache to be a structured object.
588 # expect _tagscache to be a structured object.
589 def clearcache():
589 def clearcache():
590 # _tagscache has been filteredpropertycache since 2.5 (or
590 # _tagscache has been filteredpropertycache since 2.5 (or
591 # 98c867ac1330), and delattr() can't work in such case
591 # 98c867ac1330), and delattr() can't work in such case
592 if b'_tagscache' in vars(repo):
592 if b'_tagscache' in vars(repo):
593 del repo.__dict__[b'_tagscache']
593 del repo.__dict__[b'_tagscache']
594 return clearcache
594 return clearcache
595
595
596 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
596 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
597 if repotags: # since 1.4 (or 5614a628d173)
597 if repotags: # since 1.4 (or 5614a628d173)
598 return lambda : repotags.set(None)
598 return lambda : repotags.set(None)
599
599
600 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
600 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
601 if repotagscache: # since 0.6 (or d7df759d0e97)
601 if repotagscache: # since 0.6 (or d7df759d0e97)
602 return lambda : repotagscache.set(None)
602 return lambda : repotagscache.set(None)
603
603
604 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
604 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
605 # this point, but it isn't so problematic, because:
605 # this point, but it isn't so problematic, because:
606 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
606 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
607 # in perftags() causes failure soon
607 # in perftags() causes failure soon
608 # - perf.py itself has been available since 1.1 (or eb240755386d)
608 # - perf.py itself has been available since 1.1 (or eb240755386d)
609 raise error.Abort((b"tags API of this hg command is unknown"))
609 raise error.Abort((b"tags API of this hg command is unknown"))
610
610
611 # utilities to clear cache
611 # utilities to clear cache
612
612
613 def clearfilecache(obj, attrname):
613 def clearfilecache(obj, attrname):
614 unfiltered = getattr(obj, 'unfiltered', None)
614 unfiltered = getattr(obj, 'unfiltered', None)
615 if unfiltered is not None:
615 if unfiltered is not None:
616 obj = obj.unfiltered()
616 obj = obj.unfiltered()
617 if attrname in vars(obj):
617 if attrname in vars(obj):
618 delattr(obj, attrname)
618 delattr(obj, attrname)
619 obj._filecache.pop(attrname, None)
619 obj._filecache.pop(attrname, None)
620
620
621 def clearchangelog(repo):
621 def clearchangelog(repo):
622 if repo is not repo.unfiltered():
622 if repo is not repo.unfiltered():
623 object.__setattr__(repo, r'_clcachekey', None)
623 object.__setattr__(repo, r'_clcachekey', None)
624 object.__setattr__(repo, r'_clcache', None)
624 object.__setattr__(repo, r'_clcache', None)
625 clearfilecache(repo.unfiltered(), 'changelog')
625 clearfilecache(repo.unfiltered(), 'changelog')
626
626
627 # perf commands
627 # perf commands
628
628
629 @command(b'perfwalk', formatteropts)
629 @command(b'perfwalk', formatteropts)
630 def perfwalk(ui, repo, *pats, **opts):
630 def perfwalk(ui, repo, *pats, **opts):
631 opts = _byteskwargs(opts)
631 opts = _byteskwargs(opts)
632 timer, fm = gettimer(ui, opts)
632 timer, fm = gettimer(ui, opts)
633 m = scmutil.match(repo[None], pats, {})
633 m = scmutil.match(repo[None], pats, {})
634 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
634 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
635 ignored=False))))
635 ignored=False))))
636 fm.end()
636 fm.end()
637
637
638 @command(b'perfannotate', formatteropts)
638 @command(b'perfannotate', formatteropts)
639 def perfannotate(ui, repo, f, **opts):
639 def perfannotate(ui, repo, f, **opts):
640 opts = _byteskwargs(opts)
640 opts = _byteskwargs(opts)
641 timer, fm = gettimer(ui, opts)
641 timer, fm = gettimer(ui, opts)
642 fc = repo[b'.'][f]
642 fc = repo[b'.'][f]
643 timer(lambda: len(fc.annotate(True)))
643 timer(lambda: len(fc.annotate(True)))
644 fm.end()
644 fm.end()
645
645
646 @command(b'perfstatus',
646 @command(b'perfstatus',
647 [(b'u', b'unknown', False,
647 [(b'u', b'unknown', False,
648 b'ask status to look for unknown files')] + formatteropts)
648 b'ask status to look for unknown files')] + formatteropts)
649 def perfstatus(ui, repo, **opts):
649 def perfstatus(ui, repo, **opts):
650 opts = _byteskwargs(opts)
650 opts = _byteskwargs(opts)
651 #m = match.always(repo.root, repo.getcwd())
651 #m = match.always(repo.root, repo.getcwd())
652 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
652 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
653 # False))))
653 # False))))
654 timer, fm = gettimer(ui, opts)
654 timer, fm = gettimer(ui, opts)
655 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
655 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
656 fm.end()
656 fm.end()
657
657
658 @command(b'perfaddremove', formatteropts)
658 @command(b'perfaddremove', formatteropts)
659 def perfaddremove(ui, repo, **opts):
659 def perfaddremove(ui, repo, **opts):
660 opts = _byteskwargs(opts)
660 opts = _byteskwargs(opts)
661 timer, fm = gettimer(ui, opts)
661 timer, fm = gettimer(ui, opts)
662 try:
662 try:
663 oldquiet = repo.ui.quiet
663 oldquiet = repo.ui.quiet
664 repo.ui.quiet = True
664 repo.ui.quiet = True
665 matcher = scmutil.match(repo[None])
665 matcher = scmutil.match(repo[None])
666 opts[b'dry_run'] = True
666 opts[b'dry_run'] = True
667 if b'uipathfn' in getargspec(scmutil.addremove).args:
667 if b'uipathfn' in getargspec(scmutil.addremove).args:
668 uipathfn = scmutil.getuipathfn(repo)
668 uipathfn = scmutil.getuipathfn(repo)
669 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
669 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
670 else:
670 else:
671 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
671 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
672 finally:
672 finally:
673 repo.ui.quiet = oldquiet
673 repo.ui.quiet = oldquiet
674 fm.end()
674 fm.end()
675
675
676 def clearcaches(cl):
676 def clearcaches(cl):
677 # behave somewhat consistently across internal API changes
677 # behave somewhat consistently across internal API changes
678 if util.safehasattr(cl, b'clearcaches'):
678 if util.safehasattr(cl, b'clearcaches'):
679 cl.clearcaches()
679 cl.clearcaches()
680 elif util.safehasattr(cl, b'_nodecache'):
680 elif util.safehasattr(cl, b'_nodecache'):
681 from mercurial.node import nullid, nullrev
681 from mercurial.node import nullid, nullrev
682 cl._nodecache = {nullid: nullrev}
682 cl._nodecache = {nullid: nullrev}
683 cl._nodepos = None
683 cl._nodepos = None
684
684
685 @command(b'perfheads', formatteropts)
685 @command(b'perfheads', formatteropts)
686 def perfheads(ui, repo, **opts):
686 def perfheads(ui, repo, **opts):
687 """benchmark the computation of a changelog heads"""
687 """benchmark the computation of a changelog heads"""
688 opts = _byteskwargs(opts)
688 opts = _byteskwargs(opts)
689 timer, fm = gettimer(ui, opts)
689 timer, fm = gettimer(ui, opts)
690 cl = repo.changelog
690 cl = repo.changelog
691 def s():
691 def s():
692 clearcaches(cl)
692 clearcaches(cl)
693 def d():
693 def d():
694 len(cl.headrevs())
694 len(cl.headrevs())
695 timer(d, setup=s)
695 timer(d, setup=s)
696 fm.end()
696 fm.end()
697
697
698 @command(b'perftags', formatteropts+
698 @command(b'perftags', formatteropts+
699 [
699 [
700 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
700 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
701 ])
701 ])
702 def perftags(ui, repo, **opts):
702 def perftags(ui, repo, **opts):
703 opts = _byteskwargs(opts)
703 opts = _byteskwargs(opts)
704 timer, fm = gettimer(ui, opts)
704 timer, fm = gettimer(ui, opts)
705 repocleartagscache = repocleartagscachefunc(repo)
705 repocleartagscache = repocleartagscachefunc(repo)
706 clearrevlogs = opts[b'clear_revlogs']
706 clearrevlogs = opts[b'clear_revlogs']
707 def s():
707 def s():
708 if clearrevlogs:
708 if clearrevlogs:
709 clearchangelog(repo)
709 clearchangelog(repo)
710 clearfilecache(repo.unfiltered(), 'manifest')
710 clearfilecache(repo.unfiltered(), 'manifest')
711 repocleartagscache()
711 repocleartagscache()
712 def t():
712 def t():
713 return len(repo.tags())
713 return len(repo.tags())
714 timer(t, setup=s)
714 timer(t, setup=s)
715 fm.end()
715 fm.end()
716
716
717 @command(b'perfancestors', formatteropts)
717 @command(b'perfancestors', formatteropts)
718 def perfancestors(ui, repo, **opts):
718 def perfancestors(ui, repo, **opts):
719 opts = _byteskwargs(opts)
719 opts = _byteskwargs(opts)
720 timer, fm = gettimer(ui, opts)
720 timer, fm = gettimer(ui, opts)
721 heads = repo.changelog.headrevs()
721 heads = repo.changelog.headrevs()
722 def d():
722 def d():
723 for a in repo.changelog.ancestors(heads):
723 for a in repo.changelog.ancestors(heads):
724 pass
724 pass
725 timer(d)
725 timer(d)
726 fm.end()
726 fm.end()
727
727
728 @command(b'perfancestorset', formatteropts)
728 @command(b'perfancestorset', formatteropts)
729 def perfancestorset(ui, repo, revset, **opts):
729 def perfancestorset(ui, repo, revset, **opts):
730 opts = _byteskwargs(opts)
730 opts = _byteskwargs(opts)
731 timer, fm = gettimer(ui, opts)
731 timer, fm = gettimer(ui, opts)
732 revs = repo.revs(revset)
732 revs = repo.revs(revset)
733 heads = repo.changelog.headrevs()
733 heads = repo.changelog.headrevs()
734 def d():
734 def d():
735 s = repo.changelog.ancestors(heads)
735 s = repo.changelog.ancestors(heads)
736 for rev in revs:
736 for rev in revs:
737 rev in s
737 rev in s
738 timer(d)
738 timer(d)
739 fm.end()
739 fm.end()
740
740
741 @command(b'perfdiscovery', formatteropts, b'PATH')
741 @command(b'perfdiscovery', formatteropts, b'PATH')
742 def perfdiscovery(ui, repo, path, **opts):
742 def perfdiscovery(ui, repo, path, **opts):
743 """benchmark discovery between local repo and the peer at given path
743 """benchmark discovery between local repo and the peer at given path
744 """
744 """
745 repos = [repo, None]
745 repos = [repo, None]
746 timer, fm = gettimer(ui, opts)
746 timer, fm = gettimer(ui, opts)
747 path = ui.expandpath(path)
747 path = ui.expandpath(path)
748
748
749 def s():
749 def s():
750 repos[1] = hg.peer(ui, opts, path)
750 repos[1] = hg.peer(ui, opts, path)
751 def d():
751 def d():
752 setdiscovery.findcommonheads(ui, *repos)
752 setdiscovery.findcommonheads(ui, *repos)
753 timer(d, setup=s)
753 timer(d, setup=s)
754 fm.end()
754 fm.end()
755
755
756 @command(b'perfbookmarks', formatteropts +
756 @command(b'perfbookmarks', formatteropts +
757 [
757 [
758 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
758 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
759 ])
759 ])
760 def perfbookmarks(ui, repo, **opts):
760 def perfbookmarks(ui, repo, **opts):
761 """benchmark parsing bookmarks from disk to memory"""
761 """benchmark parsing bookmarks from disk to memory"""
762 opts = _byteskwargs(opts)
762 opts = _byteskwargs(opts)
763 timer, fm = gettimer(ui, opts)
763 timer, fm = gettimer(ui, opts)
764
764
765 clearrevlogs = opts[b'clear_revlogs']
765 clearrevlogs = opts[b'clear_revlogs']
766 def s():
766 def s():
767 if clearrevlogs:
767 if clearrevlogs:
768 clearchangelog(repo)
768 clearchangelog(repo)
769 clearfilecache(repo, b'_bookmarks')
769 clearfilecache(repo, b'_bookmarks')
770 def d():
770 def d():
771 repo._bookmarks
771 repo._bookmarks
772 timer(d, setup=s)
772 timer(d, setup=s)
773 fm.end()
773 fm.end()
774
774
775 @command(b'perfbundleread', formatteropts, b'BUNDLE')
775 @command(b'perfbundleread', formatteropts, b'BUNDLE')
776 def perfbundleread(ui, repo, bundlepath, **opts):
776 def perfbundleread(ui, repo, bundlepath, **opts):
777 """Benchmark reading of bundle files.
777 """Benchmark reading of bundle files.
778
778
779 This command is meant to isolate the I/O part of bundle reading as
779 This command is meant to isolate the I/O part of bundle reading as
780 much as possible.
780 much as possible.
781 """
781 """
782 from mercurial import (
782 from mercurial import (
783 bundle2,
783 bundle2,
784 exchange,
784 exchange,
785 streamclone,
785 streamclone,
786 )
786 )
787
787
788 opts = _byteskwargs(opts)
788 opts = _byteskwargs(opts)
789
789
790 def makebench(fn):
790 def makebench(fn):
791 def run():
791 def run():
792 with open(bundlepath, b'rb') as fh:
792 with open(bundlepath, b'rb') as fh:
793 bundle = exchange.readbundle(ui, fh, bundlepath)
793 bundle = exchange.readbundle(ui, fh, bundlepath)
794 fn(bundle)
794 fn(bundle)
795
795
796 return run
796 return run
797
797
798 def makereadnbytes(size):
798 def makereadnbytes(size):
799 def run():
799 def run():
800 with open(bundlepath, b'rb') as fh:
800 with open(bundlepath, b'rb') as fh:
801 bundle = exchange.readbundle(ui, fh, bundlepath)
801 bundle = exchange.readbundle(ui, fh, bundlepath)
802 while bundle.read(size):
802 while bundle.read(size):
803 pass
803 pass
804
804
805 return run
805 return run
806
806
807 def makestdioread(size):
807 def makestdioread(size):
808 def run():
808 def run():
809 with open(bundlepath, b'rb') as fh:
809 with open(bundlepath, b'rb') as fh:
810 while fh.read(size):
810 while fh.read(size):
811 pass
811 pass
812
812
813 return run
813 return run
814
814
815 # bundle1
815 # bundle1
816
816
817 def deltaiter(bundle):
817 def deltaiter(bundle):
818 for delta in bundle.deltaiter():
818 for delta in bundle.deltaiter():
819 pass
819 pass
820
820
821 def iterchunks(bundle):
821 def iterchunks(bundle):
822 for chunk in bundle.getchunks():
822 for chunk in bundle.getchunks():
823 pass
823 pass
824
824
825 # bundle2
825 # bundle2
826
826
827 def forwardchunks(bundle):
827 def forwardchunks(bundle):
828 for chunk in bundle._forwardchunks():
828 for chunk in bundle._forwardchunks():
829 pass
829 pass
830
830
831 def iterparts(bundle):
831 def iterparts(bundle):
832 for part in bundle.iterparts():
832 for part in bundle.iterparts():
833 pass
833 pass
834
834
835 def iterpartsseekable(bundle):
835 def iterpartsseekable(bundle):
836 for part in bundle.iterparts(seekable=True):
836 for part in bundle.iterparts(seekable=True):
837 pass
837 pass
838
838
839 def seek(bundle):
839 def seek(bundle):
840 for part in bundle.iterparts(seekable=True):
840 for part in bundle.iterparts(seekable=True):
841 part.seek(0, os.SEEK_END)
841 part.seek(0, os.SEEK_END)
842
842
843 def makepartreadnbytes(size):
843 def makepartreadnbytes(size):
844 def run():
844 def run():
845 with open(bundlepath, b'rb') as fh:
845 with open(bundlepath, b'rb') as fh:
846 bundle = exchange.readbundle(ui, fh, bundlepath)
846 bundle = exchange.readbundle(ui, fh, bundlepath)
847 for part in bundle.iterparts():
847 for part in bundle.iterparts():
848 while part.read(size):
848 while part.read(size):
849 pass
849 pass
850
850
851 return run
851 return run
852
852
853 benches = [
853 benches = [
854 (makestdioread(8192), b'read(8k)'),
854 (makestdioread(8192), b'read(8k)'),
855 (makestdioread(16384), b'read(16k)'),
855 (makestdioread(16384), b'read(16k)'),
856 (makestdioread(32768), b'read(32k)'),
856 (makestdioread(32768), b'read(32k)'),
857 (makestdioread(131072), b'read(128k)'),
857 (makestdioread(131072), b'read(128k)'),
858 ]
858 ]
859
859
860 with open(bundlepath, b'rb') as fh:
860 with open(bundlepath, b'rb') as fh:
861 bundle = exchange.readbundle(ui, fh, bundlepath)
861 bundle = exchange.readbundle(ui, fh, bundlepath)
862
862
863 if isinstance(bundle, changegroup.cg1unpacker):
863 if isinstance(bundle, changegroup.cg1unpacker):
864 benches.extend([
864 benches.extend([
865 (makebench(deltaiter), b'cg1 deltaiter()'),
865 (makebench(deltaiter), b'cg1 deltaiter()'),
866 (makebench(iterchunks), b'cg1 getchunks()'),
866 (makebench(iterchunks), b'cg1 getchunks()'),
867 (makereadnbytes(8192), b'cg1 read(8k)'),
867 (makereadnbytes(8192), b'cg1 read(8k)'),
868 (makereadnbytes(16384), b'cg1 read(16k)'),
868 (makereadnbytes(16384), b'cg1 read(16k)'),
869 (makereadnbytes(32768), b'cg1 read(32k)'),
869 (makereadnbytes(32768), b'cg1 read(32k)'),
870 (makereadnbytes(131072), b'cg1 read(128k)'),
870 (makereadnbytes(131072), b'cg1 read(128k)'),
871 ])
871 ])
872 elif isinstance(bundle, bundle2.unbundle20):
872 elif isinstance(bundle, bundle2.unbundle20):
873 benches.extend([
873 benches.extend([
874 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
874 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
875 (makebench(iterparts), b'bundle2 iterparts()'),
875 (makebench(iterparts), b'bundle2 iterparts()'),
876 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
876 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
877 (makebench(seek), b'bundle2 part seek()'),
877 (makebench(seek), b'bundle2 part seek()'),
878 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
878 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
879 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
879 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
880 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
880 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
881 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
881 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
882 ])
882 ])
883 elif isinstance(bundle, streamclone.streamcloneapplier):
883 elif isinstance(bundle, streamclone.streamcloneapplier):
884 raise error.Abort(b'stream clone bundles not supported')
884 raise error.Abort(b'stream clone bundles not supported')
885 else:
885 else:
886 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
886 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
887
887
888 for fn, title in benches:
888 for fn, title in benches:
889 timer, fm = gettimer(ui, opts)
889 timer, fm = gettimer(ui, opts)
890 timer(fn, title=title)
890 timer(fn, title=title)
891 fm.end()
891 fm.end()
892
892
893 @command(b'perfchangegroupchangelog', formatteropts +
893 @command(b'perfchangegroupchangelog', formatteropts +
894 [(b'', b'cgversion', b'02', b'changegroup version'),
894 [(b'', b'cgversion', b'02', b'changegroup version'),
895 (b'r', b'rev', b'', b'revisions to add to changegroup')])
895 (b'r', b'rev', b'', b'revisions to add to changegroup')])
896 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
896 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
897 """Benchmark producing a changelog group for a changegroup.
897 """Benchmark producing a changelog group for a changegroup.
898
898
899 This measures the time spent processing the changelog during a
899 This measures the time spent processing the changelog during a
900 bundle operation. This occurs during `hg bundle` and on a server
900 bundle operation. This occurs during `hg bundle` and on a server
901 processing a `getbundle` wire protocol request (handles clones
901 processing a `getbundle` wire protocol request (handles clones
902 and pull requests).
902 and pull requests).
903
903
904 By default, all revisions are added to the changegroup.
904 By default, all revisions are added to the changegroup.
905 """
905 """
906 opts = _byteskwargs(opts)
906 opts = _byteskwargs(opts)
907 cl = repo.changelog
907 cl = repo.changelog
908 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
908 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
909 bundler = changegroup.getbundler(cgversion, repo)
909 bundler = changegroup.getbundler(cgversion, repo)
910
910
911 def d():
911 def d():
912 state, chunks = bundler._generatechangelog(cl, nodes)
912 state, chunks = bundler._generatechangelog(cl, nodes)
913 for chunk in chunks:
913 for chunk in chunks:
914 pass
914 pass
915
915
916 timer, fm = gettimer(ui, opts)
916 timer, fm = gettimer(ui, opts)
917
917
918 # Terminal printing can interfere with timing. So disable it.
918 # Terminal printing can interfere with timing. So disable it.
919 with ui.configoverride({(b'progress', b'disable'): True}):
919 with ui.configoverride({(b'progress', b'disable'): True}):
920 timer(d)
920 timer(d)
921
921
922 fm.end()
922 fm.end()
923
923
924 @command(b'perfdirs', formatteropts)
924 @command(b'perfdirs', formatteropts)
925 def perfdirs(ui, repo, **opts):
925 def perfdirs(ui, repo, **opts):
926 opts = _byteskwargs(opts)
926 opts = _byteskwargs(opts)
927 timer, fm = gettimer(ui, opts)
927 timer, fm = gettimer(ui, opts)
928 dirstate = repo.dirstate
928 dirstate = repo.dirstate
929 b'a' in dirstate
929 b'a' in dirstate
930 def d():
930 def d():
931 dirstate.hasdir(b'a')
931 dirstate.hasdir(b'a')
932 del dirstate._map._dirs
932 del dirstate._map._dirs
933 timer(d)
933 timer(d)
934 fm.end()
934 fm.end()
935
935
936 @command(b'perfdirstate', formatteropts)
936 @command(b'perfdirstate', formatteropts)
937 def perfdirstate(ui, repo, **opts):
937 def perfdirstate(ui, repo, **opts):
938 opts = _byteskwargs(opts)
938 opts = _byteskwargs(opts)
939 timer, fm = gettimer(ui, opts)
939 timer, fm = gettimer(ui, opts)
940 b"a" in repo.dirstate
940 b"a" in repo.dirstate
941 def d():
941 def d():
942 repo.dirstate.invalidate()
942 repo.dirstate.invalidate()
943 b"a" in repo.dirstate
943 b"a" in repo.dirstate
944 timer(d)
944 timer(d)
945 fm.end()
945 fm.end()
946
946
947 @command(b'perfdirstatedirs', formatteropts)
947 @command(b'perfdirstatedirs', formatteropts)
948 def perfdirstatedirs(ui, repo, **opts):
948 def perfdirstatedirs(ui, repo, **opts):
949 opts = _byteskwargs(opts)
949 opts = _byteskwargs(opts)
950 timer, fm = gettimer(ui, opts)
950 timer, fm = gettimer(ui, opts)
951 b"a" in repo.dirstate
951 b"a" in repo.dirstate
952 def d():
952 def d():
953 repo.dirstate.hasdir(b"a")
953 repo.dirstate.hasdir(b"a")
954 del repo.dirstate._map._dirs
954 del repo.dirstate._map._dirs
955 timer(d)
955 timer(d)
956 fm.end()
956 fm.end()
957
957
958 @command(b'perfdirstatefoldmap', formatteropts)
958 @command(b'perfdirstatefoldmap', formatteropts)
959 def perfdirstatefoldmap(ui, repo, **opts):
959 def perfdirstatefoldmap(ui, repo, **opts):
960 opts = _byteskwargs(opts)
960 opts = _byteskwargs(opts)
961 timer, fm = gettimer(ui, opts)
961 timer, fm = gettimer(ui, opts)
962 dirstate = repo.dirstate
962 dirstate = repo.dirstate
963 b'a' in dirstate
963 b'a' in dirstate
964 def d():
964 def d():
965 dirstate._map.filefoldmap.get(b'a')
965 dirstate._map.filefoldmap.get(b'a')
966 del dirstate._map.filefoldmap
966 del dirstate._map.filefoldmap
967 timer(d)
967 timer(d)
968 fm.end()
968 fm.end()
969
969
970 @command(b'perfdirfoldmap', formatteropts)
970 @command(b'perfdirfoldmap', formatteropts)
971 def perfdirfoldmap(ui, repo, **opts):
971 def perfdirfoldmap(ui, repo, **opts):
972 opts = _byteskwargs(opts)
972 opts = _byteskwargs(opts)
973 timer, fm = gettimer(ui, opts)
973 timer, fm = gettimer(ui, opts)
974 dirstate = repo.dirstate
974 dirstate = repo.dirstate
975 b'a' in dirstate
975 b'a' in dirstate
976 def d():
976 def d():
977 dirstate._map.dirfoldmap.get(b'a')
977 dirstate._map.dirfoldmap.get(b'a')
978 del dirstate._map.dirfoldmap
978 del dirstate._map.dirfoldmap
979 del dirstate._map._dirs
979 del dirstate._map._dirs
980 timer(d)
980 timer(d)
981 fm.end()
981 fm.end()
982
982
983 @command(b'perfdirstatewrite', formatteropts)
983 @command(b'perfdirstatewrite', formatteropts)
984 def perfdirstatewrite(ui, repo, **opts):
984 def perfdirstatewrite(ui, repo, **opts):
985 opts = _byteskwargs(opts)
985 opts = _byteskwargs(opts)
986 timer, fm = gettimer(ui, opts)
986 timer, fm = gettimer(ui, opts)
987 ds = repo.dirstate
987 ds = repo.dirstate
988 b"a" in ds
988 b"a" in ds
989 def d():
989 def d():
990 ds._dirty = True
990 ds._dirty = True
991 ds.write(repo.currenttransaction())
991 ds.write(repo.currenttransaction())
992 timer(d)
992 timer(d)
993 fm.end()
993 fm.end()
994
994
995 def _getmergerevs(repo, opts):
995 def _getmergerevs(repo, opts):
996 """parse command argument to return rev involved in merge
996 """parse command argument to return rev involved in merge
997
997
998 input: options dictionnary with `rev`, `from` and `bse`
998 input: options dictionnary with `rev`, `from` and `bse`
999 output: (localctx, otherctx, basectx)
999 output: (localctx, otherctx, basectx)
1000 """
1000 """
1001 if opts[b'from']:
1001 if opts[b'from']:
1002 fromrev = scmutil.revsingle(repo, opts[b'from'])
1002 fromrev = scmutil.revsingle(repo, opts[b'from'])
1003 wctx = repo[fromrev]
1003 wctx = repo[fromrev]
1004 else:
1004 else:
1005 wctx = repo[None]
1005 wctx = repo[None]
1006 # we don't want working dir files to be stat'd in the benchmark, so
1006 # we don't want working dir files to be stat'd in the benchmark, so
1007 # prime that cache
1007 # prime that cache
1008 wctx.dirty()
1008 wctx.dirty()
1009 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1009 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1010 if opts[b'base']:
1010 if opts[b'base']:
1011 fromrev = scmutil.revsingle(repo, opts[b'base'])
1011 fromrev = scmutil.revsingle(repo, opts[b'base'])
1012 ancestor = repo[fromrev]
1012 ancestor = repo[fromrev]
1013 else:
1013 else:
1014 ancestor = wctx.ancestor(rctx)
1014 ancestor = wctx.ancestor(rctx)
1015 return (wctx, rctx, ancestor)
1015 return (wctx, rctx, ancestor)
1016
1016
1017 @command(b'perfmergecalculate',
1017 @command(b'perfmergecalculate',
1018 [
1018 [
1019 (b'r', b'rev', b'.', b'rev to merge against'),
1019 (b'r', b'rev', b'.', b'rev to merge against'),
1020 (b'', b'from', b'', b'rev to merge from'),
1020 (b'', b'from', b'', b'rev to merge from'),
1021 (b'', b'base', b'', b'the revision to use as base'),
1021 (b'', b'base', b'', b'the revision to use as base'),
1022 ] + formatteropts)
1022 ] + formatteropts)
1023 def perfmergecalculate(ui, repo, **opts):
1023 def perfmergecalculate(ui, repo, **opts):
1024 opts = _byteskwargs(opts)
1024 opts = _byteskwargs(opts)
1025 timer, fm = gettimer(ui, opts)
1025 timer, fm = gettimer(ui, opts)
1026
1026
1027 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1027 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1028 def d():
1028 def d():
1029 # acceptremote is True because we don't want prompts in the middle of
1029 # acceptremote is True because we don't want prompts in the middle of
1030 # our benchmark
1030 # our benchmark
1031 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
1031 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
1032 acceptremote=True, followcopies=True)
1032 acceptremote=True, followcopies=True)
1033 timer(d)
1033 timer(d)
1034 fm.end()
1034 fm.end()
1035
1035
1036 @command(b'perfmergecopies',
1036 @command(b'perfmergecopies',
1037 [
1037 [
1038 (b'r', b'rev', b'.', b'rev to merge against'),
1038 (b'r', b'rev', b'.', b'rev to merge against'),
1039 (b'', b'from', b'', b'rev to merge from'),
1039 (b'', b'from', b'', b'rev to merge from'),
1040 (b'', b'base', b'', b'the revision to use as base'),
1040 (b'', b'base', b'', b'the revision to use as base'),
1041 ] + formatteropts)
1041 ] + formatteropts)
1042 def perfmergecopies(ui, repo, **opts):
1042 def perfmergecopies(ui, repo, **opts):
1043 """measure runtime of `copies.mergecopies`"""
1043 """measure runtime of `copies.mergecopies`"""
1044 opts = _byteskwargs(opts)
1044 opts = _byteskwargs(opts)
1045 timer, fm = gettimer(ui, opts)
1045 timer, fm = gettimer(ui, opts)
1046 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1046 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1047 def d():
1047 def d():
1048 # acceptremote is True because we don't want prompts in the middle of
1048 # acceptremote is True because we don't want prompts in the middle of
1049 # our benchmark
1049 # our benchmark
1050 copies.mergecopies(repo, wctx, rctx, ancestor)
1050 copies.mergecopies(repo, wctx, rctx, ancestor)
1051 timer(d)
1051 timer(d)
1052 fm.end()
1052 fm.end()
1053
1053
1054 @command(b'perfpathcopies', [], b"REV REV")
1054 @command(b'perfpathcopies', [], b"REV REV")
1055 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1055 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1056 """benchmark the copy tracing logic"""
1056 """benchmark the copy tracing logic"""
1057 opts = _byteskwargs(opts)
1057 opts = _byteskwargs(opts)
1058 timer, fm = gettimer(ui, opts)
1058 timer, fm = gettimer(ui, opts)
1059 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1059 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1060 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1060 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1061 def d():
1061 def d():
1062 copies.pathcopies(ctx1, ctx2)
1062 copies.pathcopies(ctx1, ctx2)
1063 timer(d)
1063 timer(d)
1064 fm.end()
1064 fm.end()
1065
1065
1066 @command(b'perfphases',
1066 @command(b'perfphases',
1067 [(b'', b'full', False, b'include file reading time too'),
1067 [(b'', b'full', False, b'include file reading time too'),
1068 ], b"")
1068 ], b"")
1069 def perfphases(ui, repo, **opts):
1069 def perfphases(ui, repo, **opts):
1070 """benchmark phasesets computation"""
1070 """benchmark phasesets computation"""
1071 opts = _byteskwargs(opts)
1071 opts = _byteskwargs(opts)
1072 timer, fm = gettimer(ui, opts)
1072 timer, fm = gettimer(ui, opts)
1073 _phases = repo._phasecache
1073 _phases = repo._phasecache
1074 full = opts.get(b'full')
1074 full = opts.get(b'full')
1075 def d():
1075 def d():
1076 phases = _phases
1076 phases = _phases
1077 if full:
1077 if full:
1078 clearfilecache(repo, b'_phasecache')
1078 clearfilecache(repo, b'_phasecache')
1079 phases = repo._phasecache
1079 phases = repo._phasecache
1080 phases.invalidate()
1080 phases.invalidate()
1081 phases.loadphaserevs(repo)
1081 phases.loadphaserevs(repo)
1082 timer(d)
1082 timer(d)
1083 fm.end()
1083 fm.end()
1084
1084
1085 @command(b'perfphasesremote',
1085 @command(b'perfphasesremote',
1086 [], b"[DEST]")
1086 [], b"[DEST]")
1087 def perfphasesremote(ui, repo, dest=None, **opts):
1087 def perfphasesremote(ui, repo, dest=None, **opts):
1088 """benchmark time needed to analyse phases of the remote server"""
1088 """benchmark time needed to analyse phases of the remote server"""
1089 from mercurial.node import (
1089 from mercurial.node import (
1090 bin,
1090 bin,
1091 )
1091 )
1092 from mercurial import (
1092 from mercurial import (
1093 exchange,
1093 exchange,
1094 hg,
1094 hg,
1095 phases,
1095 phases,
1096 )
1096 )
1097 opts = _byteskwargs(opts)
1097 opts = _byteskwargs(opts)
1098 timer, fm = gettimer(ui, opts)
1098 timer, fm = gettimer(ui, opts)
1099
1099
1100 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
1100 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
1101 if not path:
1101 if not path:
1102 raise error.Abort((b'default repository not configured!'),
1102 raise error.Abort((b'default repository not configured!'),
1103 hint=(b"see 'hg help config.paths'"))
1103 hint=(b"see 'hg help config.paths'"))
1104 dest = path.pushloc or path.loc
1104 dest = path.pushloc or path.loc
1105 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
1105 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
1106 other = hg.peer(repo, opts, dest)
1106 other = hg.peer(repo, opts, dest)
1107
1107
1108 # easier to perform discovery through the operation
1108 # easier to perform discovery through the operation
1109 op = exchange.pushoperation(repo, other)
1109 op = exchange.pushoperation(repo, other)
1110 exchange._pushdiscoverychangeset(op)
1110 exchange._pushdiscoverychangeset(op)
1111
1111
1112 remotesubset = op.fallbackheads
1112 remotesubset = op.fallbackheads
1113
1113
1114 with other.commandexecutor() as e:
1114 with other.commandexecutor() as e:
1115 remotephases = e.callcommand(b'listkeys',
1115 remotephases = e.callcommand(b'listkeys',
1116 {b'namespace': b'phases'}).result()
1116 {b'namespace': b'phases'}).result()
1117 del other
1117 del other
1118 publishing = remotephases.get(b'publishing', False)
1118 publishing = remotephases.get(b'publishing', False)
1119 if publishing:
1119 if publishing:
1120 ui.status((b'publishing: yes\n'))
1120 ui.status((b'publishing: yes\n'))
1121 else:
1121 else:
1122 ui.status((b'publishing: no\n'))
1122 ui.status((b'publishing: no\n'))
1123
1123
1124 nodemap = repo.changelog.nodemap
1124 nodemap = repo.changelog.nodemap
1125 nonpublishroots = 0
1125 nonpublishroots = 0
1126 for nhex, phase in remotephases.iteritems():
1126 for nhex, phase in remotephases.iteritems():
1127 if nhex == b'publishing': # ignore data related to publish option
1127 if nhex == b'publishing': # ignore data related to publish option
1128 continue
1128 continue
1129 node = bin(nhex)
1129 node = bin(nhex)
1130 if node in nodemap and int(phase):
1130 if node in nodemap and int(phase):
1131 nonpublishroots += 1
1131 nonpublishroots += 1
1132 ui.status((b'number of roots: %d\n') % len(remotephases))
1132 ui.status((b'number of roots: %d\n') % len(remotephases))
1133 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
1133 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
1134 def d():
1134 def d():
1135 phases.remotephasessummary(repo,
1135 phases.remotephasessummary(repo,
1136 remotesubset,
1136 remotesubset,
1137 remotephases)
1137 remotephases)
1138 timer(d)
1138 timer(d)
1139 fm.end()
1139 fm.end()
1140
1140
1141 @command(b'perfmanifest',[
1141 @command(b'perfmanifest',[
1142 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1142 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1143 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1143 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1144 ] + formatteropts, b'REV|NODE')
1144 ] + formatteropts, b'REV|NODE')
1145 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1145 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1146 """benchmark the time to read a manifest from disk and return a usable
1146 """benchmark the time to read a manifest from disk and return a usable
1147 dict-like object
1147 dict-like object
1148
1148
1149 Manifest caches are cleared before retrieval."""
1149 Manifest caches are cleared before retrieval."""
1150 opts = _byteskwargs(opts)
1150 opts = _byteskwargs(opts)
1151 timer, fm = gettimer(ui, opts)
1151 timer, fm = gettimer(ui, opts)
1152 if not manifest_rev:
1152 if not manifest_rev:
1153 ctx = scmutil.revsingle(repo, rev, rev)
1153 ctx = scmutil.revsingle(repo, rev, rev)
1154 t = ctx.manifestnode()
1154 t = ctx.manifestnode()
1155 else:
1155 else:
1156 from mercurial.node import bin
1156 from mercurial.node import bin
1157
1157
1158 if len(rev) == 40:
1158 if len(rev) == 40:
1159 t = bin(rev)
1159 t = bin(rev)
1160 else:
1160 else:
1161 try:
1161 try:
1162 rev = int(rev)
1162 rev = int(rev)
1163
1163
1164 if util.safehasattr(repo.manifestlog, b'getstorage'):
1164 if util.safehasattr(repo.manifestlog, b'getstorage'):
1165 t = repo.manifestlog.getstorage(b'').node(rev)
1165 t = repo.manifestlog.getstorage(b'').node(rev)
1166 else:
1166 else:
1167 t = repo.manifestlog._revlog.lookup(rev)
1167 t = repo.manifestlog._revlog.lookup(rev)
1168 except ValueError:
1168 except ValueError:
1169 raise error.Abort(b'manifest revision must be integer or full '
1169 raise error.Abort(b'manifest revision must be integer or full '
1170 b'node')
1170 b'node')
1171 def d():
1171 def d():
1172 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1172 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1173 repo.manifestlog[t].read()
1173 repo.manifestlog[t].read()
1174 timer(d)
1174 timer(d)
1175 fm.end()
1175 fm.end()
1176
1176
1177 @command(b'perfchangeset', formatteropts)
1177 @command(b'perfchangeset', formatteropts)
1178 def perfchangeset(ui, repo, rev, **opts):
1178 def perfchangeset(ui, repo, rev, **opts):
1179 opts = _byteskwargs(opts)
1179 opts = _byteskwargs(opts)
1180 timer, fm = gettimer(ui, opts)
1180 timer, fm = gettimer(ui, opts)
1181 n = scmutil.revsingle(repo, rev).node()
1181 n = scmutil.revsingle(repo, rev).node()
1182 def d():
1182 def d():
1183 repo.changelog.read(n)
1183 repo.changelog.read(n)
1184 #repo.changelog._cache = None
1184 #repo.changelog._cache = None
1185 timer(d)
1185 timer(d)
1186 fm.end()
1186 fm.end()
1187
1187
1188 @command(b'perfignore', formatteropts)
1188 @command(b'perfignore', formatteropts)
1189 def perfignore(ui, repo, **opts):
1189 def perfignore(ui, repo, **opts):
1190 """benchmark operation related to computing ignore"""
1190 """benchmark operation related to computing ignore"""
1191 opts = _byteskwargs(opts)
1191 opts = _byteskwargs(opts)
1192 timer, fm = gettimer(ui, opts)
1192 timer, fm = gettimer(ui, opts)
1193 dirstate = repo.dirstate
1193 dirstate = repo.dirstate
1194
1194
1195 def setupone():
1195 def setupone():
1196 dirstate.invalidate()
1196 dirstate.invalidate()
1197 clearfilecache(dirstate, b'_ignore')
1197 clearfilecache(dirstate, b'_ignore')
1198
1198
1199 def runone():
1199 def runone():
1200 dirstate._ignore
1200 dirstate._ignore
1201
1201
1202 timer(runone, setup=setupone, title=b"load")
1202 timer(runone, setup=setupone, title=b"load")
1203 fm.end()
1203 fm.end()
1204
1204
1205 @command(b'perfindex', [
1205 @command(b'perfindex', [
1206 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1206 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1207 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1207 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1208 ] + formatteropts)
1208 ] + formatteropts)
1209 def perfindex(ui, repo, **opts):
1209 def perfindex(ui, repo, **opts):
1210 """benchmark index creation time followed by a lookup
1210 """benchmark index creation time followed by a lookup
1211
1211
1212 The default is to look `tip` up. Depending on the index implementation,
1212 The default is to look `tip` up. Depending on the index implementation,
1213 the revision looked up can matters. For example, an implementation
1213 the revision looked up can matters. For example, an implementation
1214 scanning the index will have a faster lookup time for `--rev tip` than for
1214 scanning the index will have a faster lookup time for `--rev tip` than for
1215 `--rev 0`. The number of looked up revisions and their order can also
1215 `--rev 0`. The number of looked up revisions and their order can also
1216 matters.
1216 matters.
1217
1217
1218 Example of useful set to test:
1218 Example of useful set to test:
1219 * tip
1219 * tip
1220 * 0
1220 * 0
1221 * -10:
1221 * -10:
1222 * :10
1222 * :10
1223 * -10: + :10
1223 * -10: + :10
1224 * :10: + -10:
1224 * :10: + -10:
1225 * -10000:
1225 * -10000:
1226 * -10000: + 0
1226 * -10000: + 0
1227
1227
1228 It is not currently possible to check for lookup of a missing node. For
1228 It is not currently possible to check for lookup of a missing node. For
1229 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1229 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1230 import mercurial.revlog
1230 import mercurial.revlog
1231 opts = _byteskwargs(opts)
1231 opts = _byteskwargs(opts)
1232 timer, fm = gettimer(ui, opts)
1232 timer, fm = gettimer(ui, opts)
1233 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1233 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1234 if opts[b'no_lookup']:
1234 if opts[b'no_lookup']:
1235 if opts['rev']:
1235 if opts['rev']:
1236 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1236 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1237 nodes = []
1237 nodes = []
1238 elif not opts[b'rev']:
1238 elif not opts[b'rev']:
1239 nodes = [repo[b"tip"].node()]
1239 nodes = [repo[b"tip"].node()]
1240 else:
1240 else:
1241 revs = scmutil.revrange(repo, opts[b'rev'])
1241 revs = scmutil.revrange(repo, opts[b'rev'])
1242 cl = repo.changelog
1242 cl = repo.changelog
1243 nodes = [cl.node(r) for r in revs]
1243 nodes = [cl.node(r) for r in revs]
1244
1244
1245 unfi = repo.unfiltered()
1245 unfi = repo.unfiltered()
1246 # find the filecache func directly
1246 # find the filecache func directly
1247 # This avoid polluting the benchmark with the filecache logic
1247 # This avoid polluting the benchmark with the filecache logic
1248 makecl = unfi.__class__.changelog.func
1248 makecl = unfi.__class__.changelog.func
1249 def setup():
1249 def setup():
1250 # probably not necessary, but for good measure
1250 # probably not necessary, but for good measure
1251 clearchangelog(unfi)
1251 clearchangelog(unfi)
1252 def d():
1252 def d():
1253 cl = makecl(unfi)
1253 cl = makecl(unfi)
1254 for n in nodes:
1254 for n in nodes:
1255 cl.rev(n)
1255 cl.rev(n)
1256 timer(d, setup=setup)
1256 timer(d, setup=setup)
1257 fm.end()
1257 fm.end()
1258
1258
1259 @command(b'perfnodemap', [
1259 @command(b'perfnodemap', [
1260 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1260 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1261 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1261 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1262 ] + formatteropts)
1262 ] + formatteropts)
1263 def perfnodemap(ui, repo, **opts):
1263 def perfnodemap(ui, repo, **opts):
1264 """benchmark the time necessary to look up revision from a cold nodemap
1264 """benchmark the time necessary to look up revision from a cold nodemap
1265
1265
1266 Depending on the implementation, the amount and order of revision we look
1266 Depending on the implementation, the amount and order of revision we look
1267 up can varies. Example of useful set to test:
1267 up can varies. Example of useful set to test:
1268 * tip
1268 * tip
1269 * 0
1269 * 0
1270 * -10:
1270 * -10:
1271 * :10
1271 * :10
1272 * -10: + :10
1272 * -10: + :10
1273 * :10: + -10:
1273 * :10: + -10:
1274 * -10000:
1274 * -10000:
1275 * -10000: + 0
1275 * -10000: + 0
1276
1276
1277 The command currently focus on valid binary lookup. Benchmarking for
1277 The command currently focus on valid binary lookup. Benchmarking for
1278 hexlookup, prefix lookup and missing lookup would also be valuable.
1278 hexlookup, prefix lookup and missing lookup would also be valuable.
1279 """
1279 """
1280 import mercurial.revlog
1280 import mercurial.revlog
1281 opts = _byteskwargs(opts)
1281 opts = _byteskwargs(opts)
1282 timer, fm = gettimer(ui, opts)
1282 timer, fm = gettimer(ui, opts)
1283 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1283 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1284
1284
1285 unfi = repo.unfiltered()
1285 unfi = repo.unfiltered()
1286 clearcaches = opts['clear_caches']
1286 clearcaches = opts['clear_caches']
1287 # find the filecache func directly
1287 # find the filecache func directly
1288 # This avoid polluting the benchmark with the filecache logic
1288 # This avoid polluting the benchmark with the filecache logic
1289 makecl = unfi.__class__.changelog.func
1289 makecl = unfi.__class__.changelog.func
1290 if not opts[b'rev']:
1290 if not opts[b'rev']:
1291 raise error.Abort('use --rev to specify revisions to look up')
1291 raise error.Abort('use --rev to specify revisions to look up')
1292 revs = scmutil.revrange(repo, opts[b'rev'])
1292 revs = scmutil.revrange(repo, opts[b'rev'])
1293 cl = repo.changelog
1293 cl = repo.changelog
1294 nodes = [cl.node(r) for r in revs]
1294 nodes = [cl.node(r) for r in revs]
1295
1295
1296 # use a list to pass reference to a nodemap from one closure to the next
1296 # use a list to pass reference to a nodemap from one closure to the next
1297 nodeget = [None]
1297 nodeget = [None]
1298 def setnodeget():
1298 def setnodeget():
1299 # probably not necessary, but for good measure
1299 # probably not necessary, but for good measure
1300 clearchangelog(unfi)
1300 clearchangelog(unfi)
1301 nodeget[0] = makecl(unfi).nodemap.get
1301 nodeget[0] = makecl(unfi).nodemap.get
1302
1302
1303 def d():
1303 def d():
1304 get = nodeget[0]
1304 get = nodeget[0]
1305 for n in nodes:
1305 for n in nodes:
1306 get(n)
1306 get(n)
1307
1307
1308 setup = None
1308 setup = None
1309 if clearcaches:
1309 if clearcaches:
1310 def setup():
1310 def setup():
1311 setnodeget()
1311 setnodeget()
1312 else:
1312 else:
1313 setnodeget()
1313 setnodeget()
1314 d() # prewarm the data structure
1314 d() # prewarm the data structure
1315 timer(d, setup=setup)
1315 timer(d, setup=setup)
1316 fm.end()
1316 fm.end()
1317
1317
1318 @command(b'perfstartup', formatteropts)
1318 @command(b'perfstartup', formatteropts)
1319 def perfstartup(ui, repo, **opts):
1319 def perfstartup(ui, repo, **opts):
1320 opts = _byteskwargs(opts)
1320 opts = _byteskwargs(opts)
1321 timer, fm = gettimer(ui, opts)
1321 timer, fm = gettimer(ui, opts)
1322 def d():
1322 def d():
1323 if os.name != r'nt':
1323 if os.name != r'nt':
1324 os.system(b"HGRCPATH= %s version -q > /dev/null" %
1324 os.system(b"HGRCPATH= %s version -q > /dev/null" %
1325 fsencode(sys.argv[0]))
1325 fsencode(sys.argv[0]))
1326 else:
1326 else:
1327 os.environ[r'HGRCPATH'] = r' '
1327 os.environ[r'HGRCPATH'] = r' '
1328 os.system(r"%s version -q > NUL" % sys.argv[0])
1328 os.system(r"%s version -q > NUL" % sys.argv[0])
1329 timer(d)
1329 timer(d)
1330 fm.end()
1330 fm.end()
1331
1331
1332 @command(b'perfparents', formatteropts)
1332 @command(b'perfparents', formatteropts)
1333 def perfparents(ui, repo, **opts):
1333 def perfparents(ui, repo, **opts):
1334 """benchmark the time necessary to fetch one changeset's parents.
1334 """benchmark the time necessary to fetch one changeset's parents.
1335
1335
1336 The fetch is done using the `node identifier`, traversing all object layers
1336 The fetch is done using the `node identifier`, traversing all object layers
1337 from the repository object. The first N revisions will be used for this
1337 from the repository object. The first N revisions will be used for this
1338 benchmark. N is controlled by the ``perf.parentscount`` config option
1338 benchmark. N is controlled by the ``perf.parentscount`` config option
1339 (default: 1000).
1339 (default: 1000).
1340 """
1340 """
1341 opts = _byteskwargs(opts)
1341 opts = _byteskwargs(opts)
1342 timer, fm = gettimer(ui, opts)
1342 timer, fm = gettimer(ui, opts)
1343 # control the number of commits perfparents iterates over
1343 # control the number of commits perfparents iterates over
1344 # experimental config: perf.parentscount
1344 # experimental config: perf.parentscount
1345 count = getint(ui, b"perf", b"parentscount", 1000)
1345 count = getint(ui, b"perf", b"parentscount", 1000)
1346 if len(repo.changelog) < count:
1346 if len(repo.changelog) < count:
1347 raise error.Abort(b"repo needs %d commits for this test" % count)
1347 raise error.Abort(b"repo needs %d commits for this test" % count)
1348 repo = repo.unfiltered()
1348 repo = repo.unfiltered()
1349 nl = [repo.changelog.node(i) for i in _xrange(count)]
1349 nl = [repo.changelog.node(i) for i in _xrange(count)]
1350 def d():
1350 def d():
1351 for n in nl:
1351 for n in nl:
1352 repo.changelog.parents(n)
1352 repo.changelog.parents(n)
1353 timer(d)
1353 timer(d)
1354 fm.end()
1354 fm.end()
1355
1355
1356 @command(b'perfctxfiles', formatteropts)
1356 @command(b'perfctxfiles', formatteropts)
1357 def perfctxfiles(ui, repo, x, **opts):
1357 def perfctxfiles(ui, repo, x, **opts):
1358 opts = _byteskwargs(opts)
1358 opts = _byteskwargs(opts)
1359 x = int(x)
1359 x = int(x)
1360 timer, fm = gettimer(ui, opts)
1360 timer, fm = gettimer(ui, opts)
1361 def d():
1361 def d():
1362 len(repo[x].files())
1362 len(repo[x].files())
1363 timer(d)
1363 timer(d)
1364 fm.end()
1364 fm.end()
1365
1365
1366 @command(b'perfrawfiles', formatteropts)
1366 @command(b'perfrawfiles', formatteropts)
1367 def perfrawfiles(ui, repo, x, **opts):
1367 def perfrawfiles(ui, repo, x, **opts):
1368 opts = _byteskwargs(opts)
1368 opts = _byteskwargs(opts)
1369 x = int(x)
1369 x = int(x)
1370 timer, fm = gettimer(ui, opts)
1370 timer, fm = gettimer(ui, opts)
1371 cl = repo.changelog
1371 cl = repo.changelog
1372 def d():
1372 def d():
1373 len(cl.read(x)[3])
1373 len(cl.read(x)[3])
1374 timer(d)
1374 timer(d)
1375 fm.end()
1375 fm.end()
1376
1376
1377 @command(b'perflookup', formatteropts)
1377 @command(b'perflookup', formatteropts)
1378 def perflookup(ui, repo, rev, **opts):
1378 def perflookup(ui, repo, rev, **opts):
1379 opts = _byteskwargs(opts)
1379 opts = _byteskwargs(opts)
1380 timer, fm = gettimer(ui, opts)
1380 timer, fm = gettimer(ui, opts)
1381 timer(lambda: len(repo.lookup(rev)))
1381 timer(lambda: len(repo.lookup(rev)))
1382 fm.end()
1382 fm.end()
1383
1383
1384 @command(b'perflinelogedits',
1384 @command(b'perflinelogedits',
1385 [(b'n', b'edits', 10000, b'number of edits'),
1385 [(b'n', b'edits', 10000, b'number of edits'),
1386 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1386 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1387 ], norepo=True)
1387 ], norepo=True)
1388 def perflinelogedits(ui, **opts):
1388 def perflinelogedits(ui, **opts):
1389 from mercurial import linelog
1389 from mercurial import linelog
1390
1390
1391 opts = _byteskwargs(opts)
1391 opts = _byteskwargs(opts)
1392
1392
1393 edits = opts[b'edits']
1393 edits = opts[b'edits']
1394 maxhunklines = opts[b'max_hunk_lines']
1394 maxhunklines = opts[b'max_hunk_lines']
1395
1395
1396 maxb1 = 100000
1396 maxb1 = 100000
1397 random.seed(0)
1397 random.seed(0)
1398 randint = random.randint
1398 randint = random.randint
1399 currentlines = 0
1399 currentlines = 0
1400 arglist = []
1400 arglist = []
1401 for rev in _xrange(edits):
1401 for rev in _xrange(edits):
1402 a1 = randint(0, currentlines)
1402 a1 = randint(0, currentlines)
1403 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1403 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1404 b1 = randint(0, maxb1)
1404 b1 = randint(0, maxb1)
1405 b2 = randint(b1, b1 + maxhunklines)
1405 b2 = randint(b1, b1 + maxhunklines)
1406 currentlines += (b2 - b1) - (a2 - a1)
1406 currentlines += (b2 - b1) - (a2 - a1)
1407 arglist.append((rev, a1, a2, b1, b2))
1407 arglist.append((rev, a1, a2, b1, b2))
1408
1408
1409 def d():
1409 def d():
1410 ll = linelog.linelog()
1410 ll = linelog.linelog()
1411 for args in arglist:
1411 for args in arglist:
1412 ll.replacelines(*args)
1412 ll.replacelines(*args)
1413
1413
1414 timer, fm = gettimer(ui, opts)
1414 timer, fm = gettimer(ui, opts)
1415 timer(d)
1415 timer(d)
1416 fm.end()
1416 fm.end()
1417
1417
1418 @command(b'perfrevrange', formatteropts)
1418 @command(b'perfrevrange', formatteropts)
1419 def perfrevrange(ui, repo, *specs, **opts):
1419 def perfrevrange(ui, repo, *specs, **opts):
1420 opts = _byteskwargs(opts)
1420 opts = _byteskwargs(opts)
1421 timer, fm = gettimer(ui, opts)
1421 timer, fm = gettimer(ui, opts)
1422 revrange = scmutil.revrange
1422 revrange = scmutil.revrange
1423 timer(lambda: len(revrange(repo, specs)))
1423 timer(lambda: len(revrange(repo, specs)))
1424 fm.end()
1424 fm.end()
1425
1425
1426 @command(b'perfnodelookup', formatteropts)
1426 @command(b'perfnodelookup', formatteropts)
1427 def perfnodelookup(ui, repo, rev, **opts):
1427 def perfnodelookup(ui, repo, rev, **opts):
1428 opts = _byteskwargs(opts)
1428 opts = _byteskwargs(opts)
1429 timer, fm = gettimer(ui, opts)
1429 timer, fm = gettimer(ui, opts)
1430 import mercurial.revlog
1430 import mercurial.revlog
1431 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1431 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1432 n = scmutil.revsingle(repo, rev).node()
1432 n = scmutil.revsingle(repo, rev).node()
1433 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1433 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1434 def d():
1434 def d():
1435 cl.rev(n)
1435 cl.rev(n)
1436 clearcaches(cl)
1436 clearcaches(cl)
1437 timer(d)
1437 timer(d)
1438 fm.end()
1438 fm.end()
1439
1439
1440 @command(b'perflog',
1440 @command(b'perflog',
1441 [(b'', b'rename', False, b'ask log to follow renames')
1441 [(b'', b'rename', False, b'ask log to follow renames')
1442 ] + formatteropts)
1442 ] + formatteropts)
1443 def perflog(ui, repo, rev=None, **opts):
1443 def perflog(ui, repo, rev=None, **opts):
1444 opts = _byteskwargs(opts)
1444 opts = _byteskwargs(opts)
1445 if rev is None:
1445 if rev is None:
1446 rev=[]
1446 rev=[]
1447 timer, fm = gettimer(ui, opts)
1447 timer, fm = gettimer(ui, opts)
1448 ui.pushbuffer()
1448 ui.pushbuffer()
1449 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1449 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1450 copies=opts.get(b'rename')))
1450 copies=opts.get(b'rename')))
1451 ui.popbuffer()
1451 ui.popbuffer()
1452 fm.end()
1452 fm.end()
1453
1453
1454 @command(b'perfmoonwalk', formatteropts)
1454 @command(b'perfmoonwalk', formatteropts)
1455 def perfmoonwalk(ui, repo, **opts):
1455 def perfmoonwalk(ui, repo, **opts):
1456 """benchmark walking the changelog backwards
1456 """benchmark walking the changelog backwards
1457
1457
1458 This also loads the changelog data for each revision in the changelog.
1458 This also loads the changelog data for each revision in the changelog.
1459 """
1459 """
1460 opts = _byteskwargs(opts)
1460 opts = _byteskwargs(opts)
1461 timer, fm = gettimer(ui, opts)
1461 timer, fm = gettimer(ui, opts)
1462 def moonwalk():
1462 def moonwalk():
1463 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1463 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1464 ctx = repo[i]
1464 ctx = repo[i]
1465 ctx.branch() # read changelog data (in addition to the index)
1465 ctx.branch() # read changelog data (in addition to the index)
1466 timer(moonwalk)
1466 timer(moonwalk)
1467 fm.end()
1467 fm.end()
1468
1468
1469 @command(b'perftemplating',
1469 @command(b'perftemplating',
1470 [(b'r', b'rev', [], b'revisions to run the template on'),
1470 [(b'r', b'rev', [], b'revisions to run the template on'),
1471 ] + formatteropts)
1471 ] + formatteropts)
1472 def perftemplating(ui, repo, testedtemplate=None, **opts):
1472 def perftemplating(ui, repo, testedtemplate=None, **opts):
1473 """test the rendering time of a given template"""
1473 """test the rendering time of a given template"""
1474 if makelogtemplater is None:
1474 if makelogtemplater is None:
1475 raise error.Abort((b"perftemplating not available with this Mercurial"),
1475 raise error.Abort((b"perftemplating not available with this Mercurial"),
1476 hint=b"use 4.3 or later")
1476 hint=b"use 4.3 or later")
1477
1477
1478 opts = _byteskwargs(opts)
1478 opts = _byteskwargs(opts)
1479
1479
1480 nullui = ui.copy()
1480 nullui = ui.copy()
1481 nullui.fout = open(os.devnull, r'wb')
1481 nullui.fout = open(os.devnull, r'wb')
1482 nullui.disablepager()
1482 nullui.disablepager()
1483 revs = opts.get(b'rev')
1483 revs = opts.get(b'rev')
1484 if not revs:
1484 if not revs:
1485 revs = [b'all()']
1485 revs = [b'all()']
1486 revs = list(scmutil.revrange(repo, revs))
1486 revs = list(scmutil.revrange(repo, revs))
1487
1487
1488 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1488 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1489 b' {author|person}: {desc|firstline}\n')
1489 b' {author|person}: {desc|firstline}\n')
1490 if testedtemplate is None:
1490 if testedtemplate is None:
1491 testedtemplate = defaulttemplate
1491 testedtemplate = defaulttemplate
1492 displayer = makelogtemplater(nullui, repo, testedtemplate)
1492 displayer = makelogtemplater(nullui, repo, testedtemplate)
1493 def format():
1493 def format():
1494 for r in revs:
1494 for r in revs:
1495 ctx = repo[r]
1495 ctx = repo[r]
1496 displayer.show(ctx)
1496 displayer.show(ctx)
1497 displayer.flush(ctx)
1497 displayer.flush(ctx)
1498
1498
1499 timer, fm = gettimer(ui, opts)
1499 timer, fm = gettimer(ui, opts)
1500 timer(format)
1500 timer(format)
1501 fm.end()
1501 fm.end()
1502
1502
1503 @command(b'perfhelper-mergecopies', formatteropts +
1503 @command(b'perfhelper-mergecopies', formatteropts +
1504 [
1504 [
1505 (b'r', b'revs', [], b'restrict search to these revisions'),
1505 (b'r', b'revs', [], b'restrict search to these revisions'),
1506 (b'', b'timing', False, b'provides extra data (costly)'),
1506 (b'', b'timing', False, b'provides extra data (costly)'),
1507 ])
1507 ])
1508 def perfhelpermergecopies(ui, repo, revs=[], **opts):
1508 def perfhelpermergecopies(ui, repo, revs=[], **opts):
1509 """find statistics about potential parameters for `perfmergecopies`
1509 """find statistics about potential parameters for `perfmergecopies`
1510
1510
1511 This command find (base, p1, p2) triplet relevant for copytracing
1511 This command find (base, p1, p2) triplet relevant for copytracing
1512 benchmarking in the context of a merge. It reports values for some of the
1512 benchmarking in the context of a merge. It reports values for some of the
1513 parameters that impact merge copy tracing time during merge.
1513 parameters that impact merge copy tracing time during merge.
1514
1514
1515 If `--timing` is set, rename detection is run and the associated timing
1515 If `--timing` is set, rename detection is run and the associated timing
1516 will be reported. The extra details come at the cost of slower command
1516 will be reported. The extra details come at the cost of slower command
1517 execution.
1517 execution.
1518
1518
1519 Since rename detection is only run once, other factors might easily
1519 Since rename detection is only run once, other factors might easily
1520 affect the precision of the timing. However it should give a good
1520 affect the precision of the timing. However it should give a good
1521 approximation of which revision triplets are very costly.
1521 approximation of which revision triplets are very costly.
1522 """
1522 """
1523 opts = _byteskwargs(opts)
1523 opts = _byteskwargs(opts)
1524 fm = ui.formatter(b'perf', opts)
1524 fm = ui.formatter(b'perf', opts)
1525 dotiming = opts[b'timing']
1525 dotiming = opts[b'timing']
1526
1526
1527 output_template = [
1527 output_template = [
1528 ("base", "%(base)12s"),
1528 ("base", "%(base)12s"),
1529 ("p1", "%(p1.node)12s"),
1529 ("p1", "%(p1.node)12s"),
1530 ("p2", "%(p2.node)12s"),
1530 ("p2", "%(p2.node)12s"),
1531 ("p1.nb-revs", "%(p1.nbrevs)12d"),
1531 ("p1.nb-revs", "%(p1.nbrevs)12d"),
1532 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
1532 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
1533 ("p1.renames", "%(p1.renamedfiles)12d"),
1533 ("p1.renames", "%(p1.renamedfiles)12d"),
1534 ("p1.time", "%(p1.time)12.3f"),
1534 ("p1.time", "%(p1.time)12.3f"),
1535 ("p2.nb-revs", "%(p2.nbrevs)12d"),
1535 ("p2.nb-revs", "%(p2.nbrevs)12d"),
1536 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
1536 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
1537 ("p2.renames", "%(p2.renamedfiles)12d"),
1537 ("p2.renames", "%(p2.renamedfiles)12d"),
1538 ("p2.time", "%(p2.time)12.3f"),
1538 ("p2.time", "%(p2.time)12.3f"),
1539 ("renames", "%(nbrenamedfiles)12d"),
1539 ("renames", "%(nbrenamedfiles)12d"),
1540 ("total.time", "%(time)12.3f"),
1540 ("total.time", "%(time)12.3f"),
1541 ]
1541 ]
1542 if not dotiming:
1542 if not dotiming:
1543 output_template = [i for i in output_template
1543 output_template = [i for i in output_template
1544 if not ('time' in i[0] or 'renames' in i[0])]
1544 if not ('time' in i[0] or 'renames' in i[0])]
1545 header_names = [h for (h, v) in output_template]
1545 header_names = [h for (h, v) in output_template]
1546 output = ' '.join([v for (h, v) in output_template]) + '\n'
1546 output = ' '.join([v for (h, v) in output_template]) + '\n'
1547 header = ' '.join(['%12s'] * len(header_names)) + '\n'
1547 header = ' '.join(['%12s'] * len(header_names)) + '\n'
1548 fm.plain(header % tuple(header_names))
1548 fm.plain(header % tuple(header_names))
1549
1549
1550 if not revs:
1550 if not revs:
1551 revs = ['all()']
1551 revs = ['all()']
1552 revs = scmutil.revrange(repo, revs)
1552 revs = scmutil.revrange(repo, revs)
1553
1553
1554 roi = repo.revs('merge() and %ld', revs)
1554 roi = repo.revs('merge() and %ld', revs)
1555 for r in roi:
1555 for r in roi:
1556 ctx = repo[r]
1556 ctx = repo[r]
1557 p1 = ctx.p1()
1557 p1 = ctx.p1()
1558 p2 = ctx.p2()
1558 p2 = ctx.p2()
1559 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
1559 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
1560 for b in bases:
1560 for b in bases:
1561 b = repo[b]
1561 b = repo[b]
1562 p1missing = copies._computeforwardmissing(b, p1)
1562 p1missing = copies._computeforwardmissing(b, p1)
1563 p2missing = copies._computeforwardmissing(b, p2)
1563 p2missing = copies._computeforwardmissing(b, p2)
1564 data = {
1564 data = {
1565 b'base': b.hex(),
1565 b'base': b.hex(),
1566 b'p1.node': p1.hex(),
1566 b'p1.node': p1.hex(),
1567 b'p1.nbrevs': len(repo.revs('%d::%d', b.rev(), p1.rev())),
1567 b'p1.nbrevs': len(repo.revs('%d::%d', b.rev(), p1.rev())),
1568 b'p1.nbmissingfiles': len(p1missing),
1568 b'p1.nbmissingfiles': len(p1missing),
1569 b'p2.node': p2.hex(),
1569 b'p2.node': p2.hex(),
1570 b'p2.nbrevs': len(repo.revs('%d::%d', b.rev(), p2.rev())),
1570 b'p2.nbrevs': len(repo.revs('%d::%d', b.rev(), p2.rev())),
1571 b'p2.nbmissingfiles': len(p2missing),
1571 b'p2.nbmissingfiles': len(p2missing),
1572 }
1572 }
1573 if dotiming:
1573 if dotiming:
1574 begin = util.timer()
1574 begin = util.timer()
1575 mergedata = copies.mergecopies(repo, p1, p2, b)
1575 mergedata = copies.mergecopies(repo, p1, p2, b)
1576 end = util.timer()
1576 end = util.timer()
1577 # not very stable timing since we did only one run
1577 # not very stable timing since we did only one run
1578 data['time'] = end - begin
1578 data['time'] = end - begin
1579 # mergedata contains five dicts: "copy", "movewithdir",
1579 # mergedata contains five dicts: "copy", "movewithdir",
1580 # "diverge", "renamedelete" and "dirmove".
1580 # "diverge", "renamedelete" and "dirmove".
1581 # The first 4 are about renamed file so lets count that.
1581 # The first 4 are about renamed file so lets count that.
1582 renames = len(mergedata[0])
1582 renames = len(mergedata[0])
1583 renames += len(mergedata[1])
1583 renames += len(mergedata[1])
1584 renames += len(mergedata[2])
1584 renames += len(mergedata[2])
1585 renames += len(mergedata[3])
1585 renames += len(mergedata[3])
1586 data['nbrenamedfiles'] = renames
1586 data['nbrenamedfiles'] = renames
1587 begin = util.timer()
1587 begin = util.timer()
1588 p1renames = copies.pathcopies(b, p1)
1588 p1renames = copies.pathcopies(b, p1)
1589 end = util.timer()
1589 end = util.timer()
1590 data['p1.time'] = end - begin
1590 data['p1.time'] = end - begin
1591 begin = util.timer()
1591 begin = util.timer()
1592 p2renames = copies.pathcopies(b, p2)
1592 p2renames = copies.pathcopies(b, p2)
1593 data['p2.time'] = end - begin
1593 data['p2.time'] = end - begin
1594 end = util.timer()
1594 end = util.timer()
1595 data['p1.renamedfiles'] = len(p1renames)
1595 data['p1.renamedfiles'] = len(p1renames)
1596 data['p2.renamedfiles'] = len(p2renames)
1596 data['p2.renamedfiles'] = len(p2renames)
1597 fm.startitem()
1597 fm.startitem()
1598 fm.data(**data)
1598 fm.data(**data)
1599 # make node pretty for the human output
1599 # make node pretty for the human output
1600 out = data.copy()
1600 out = data.copy()
1601 out['base'] = fm.hexfunc(b.node())
1601 out['base'] = fm.hexfunc(b.node())
1602 out['p1.node'] = fm.hexfunc(p1.node())
1602 out['p1.node'] = fm.hexfunc(p1.node())
1603 out['p2.node'] = fm.hexfunc(p2.node())
1603 out['p2.node'] = fm.hexfunc(p2.node())
1604 fm.plain(output % out)
1604 fm.plain(output % out)
1605
1605
1606 fm.end()
1606 fm.end()
1607
1607
1608 @command(b'perfhelper-pathcopies', formatteropts +
1608 @command(b'perfhelper-pathcopies', formatteropts +
1609 [
1609 [
1610 (b'r', b'revs', [], b'restrict search to these revisions'),
1610 (b'r', b'revs', [], b'restrict search to these revisions'),
1611 (b'', b'timing', False, b'provides extra data (costly)'),
1611 (b'', b'timing', False, b'provides extra data (costly)'),
1612 ])
1612 ])
1613 def perfhelperpathcopies(ui, repo, revs=[], **opts):
1613 def perfhelperpathcopies(ui, repo, revs=[], **opts):
1614 """find statistic about potential parameters for the `perftracecopies`
1614 """find statistic about potential parameters for the `perftracecopies`
1615
1615
1616 This command find source-destination pair relevant for copytracing testing.
1616 This command find source-destination pair relevant for copytracing testing.
1617 It report value for some of the parameters that impact copy tracing time.
1617 It report value for some of the parameters that impact copy tracing time.
1618
1618
1619 If `--timing` is set, rename detection is run and the associated timing
1619 If `--timing` is set, rename detection is run and the associated timing
1620 will be reported. The extra details comes at the cost of a slower command
1620 will be reported. The extra details comes at the cost of a slower command
1621 execution.
1621 execution.
1622
1622
1623 Since the rename detection is only run once, other factors might easily
1623 Since the rename detection is only run once, other factors might easily
1624 affect the precision of the timing. However it should give a good
1624 affect the precision of the timing. However it should give a good
1625 approximation of which revision pairs are very costly.
1625 approximation of which revision pairs are very costly.
1626 """
1626 """
1627 opts = _byteskwargs(opts)
1627 opts = _byteskwargs(opts)
1628 fm = ui.formatter(b'perf', opts)
1628 fm = ui.formatter(b'perf', opts)
1629 dotiming = opts[b'timing']
1629 dotiming = opts[b'timing']
1630
1630
1631 if dotiming:
1631 if dotiming:
1632 header = '%12s %12s %12s %12s %12s %12s\n'
1632 header = '%12s %12s %12s %12s %12s %12s\n'
1633 output = ("%(source)12s %(destination)12s "
1633 output = ("%(source)12s %(destination)12s "
1634 "%(nbrevs)12d %(nbmissingfiles)12d "
1634 "%(nbrevs)12d %(nbmissingfiles)12d "
1635 "%(nbrenamedfiles)12d %(time)18.5f\n")
1635 "%(nbrenamedfiles)12d %(time)18.5f\n")
1636 header_names = ("source", "destination", "nb-revs", "nb-files",
1636 header_names = ("source", "destination", "nb-revs", "nb-files",
1637 "nb-renames", "time")
1637 "nb-renames", "time")
1638 fm.plain(header % header_names)
1638 fm.plain(header % header_names)
1639 else:
1639 else:
1640 header = '%12s %12s %12s %12s\n'
1640 header = '%12s %12s %12s %12s\n'
1641 output = ("%(source)12s %(destination)12s "
1641 output = ("%(source)12s %(destination)12s "
1642 "%(nbrevs)12d %(nbmissingfiles)12d\n")
1642 "%(nbrevs)12d %(nbmissingfiles)12d\n")
1643 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
1643 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
1644
1644
1645 if not revs:
1645 if not revs:
1646 revs = ['all()']
1646 revs = ['all()']
1647 revs = scmutil.revrange(repo, revs)
1647 revs = scmutil.revrange(repo, revs)
1648
1648
1649 roi = repo.revs('merge() and %ld', revs)
1649 roi = repo.revs('merge() and %ld', revs)
1650 for r in roi:
1650 for r in roi:
1651 ctx = repo[r]
1651 ctx = repo[r]
1652 p1 = ctx.p1().rev()
1652 p1 = ctx.p1().rev()
1653 p2 = ctx.p2().rev()
1653 p2 = ctx.p2().rev()
1654 bases = repo.changelog._commonancestorsheads(p1, p2)
1654 bases = repo.changelog._commonancestorsheads(p1, p2)
1655 for p in (p1, p2):
1655 for p in (p1, p2):
1656 for b in bases:
1656 for b in bases:
1657 base = repo[b]
1657 base = repo[b]
1658 parent = repo[p]
1658 parent = repo[p]
1659 missing = copies._computeforwardmissing(base, parent)
1659 missing = copies._computeforwardmissing(base, parent)
1660 if not missing:
1660 if not missing:
1661 continue
1661 continue
1662 data = {
1662 data = {
1663 b'source': base.hex(),
1663 b'source': base.hex(),
1664 b'destination': parent.hex(),
1664 b'destination': parent.hex(),
1665 b'nbrevs': len(repo.revs('%d::%d', b, p)),
1665 b'nbrevs': len(repo.revs('%d::%d', b, p)),
1666 b'nbmissingfiles': len(missing),
1666 b'nbmissingfiles': len(missing),
1667 }
1667 }
1668 if dotiming:
1668 if dotiming:
1669 begin = util.timer()
1669 begin = util.timer()
1670 renames = copies.pathcopies(base, parent)
1670 renames = copies.pathcopies(base, parent)
1671 end = util.timer()
1671 end = util.timer()
1672 # not very stable timing since we did only one run
1672 # not very stable timing since we did only one run
1673 data['time'] = end - begin
1673 data['time'] = end - begin
1674 data['nbrenamedfiles'] = len(renames)
1674 data['nbrenamedfiles'] = len(renames)
1675 fm.startitem()
1675 fm.startitem()
1676 fm.data(**data)
1676 fm.data(**data)
1677 out = data.copy()
1677 out = data.copy()
1678 out['source'] = fm.hexfunc(base.node())
1678 out['source'] = fm.hexfunc(base.node())
1679 out['destination'] = fm.hexfunc(parent.node())
1679 out['destination'] = fm.hexfunc(parent.node())
1680 fm.plain(output % out)
1680 fm.plain(output % out)
1681
1681
1682 fm.end()
1682 fm.end()
1683
1683
1684 @command(b'perfcca', formatteropts)
1684 @command(b'perfcca', formatteropts)
1685 def perfcca(ui, repo, **opts):
1685 def perfcca(ui, repo, **opts):
1686 opts = _byteskwargs(opts)
1686 opts = _byteskwargs(opts)
1687 timer, fm = gettimer(ui, opts)
1687 timer, fm = gettimer(ui, opts)
1688 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1688 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1689 fm.end()
1689 fm.end()
1690
1690
1691 @command(b'perffncacheload', formatteropts)
1691 @command(b'perffncacheload', formatteropts)
1692 def perffncacheload(ui, repo, **opts):
1692 def perffncacheload(ui, repo, **opts):
1693 opts = _byteskwargs(opts)
1693 opts = _byteskwargs(opts)
1694 timer, fm = gettimer(ui, opts)
1694 timer, fm = gettimer(ui, opts)
1695 s = repo.store
1695 s = repo.store
1696 def d():
1696 def d():
1697 s.fncache._load()
1697 s.fncache._load()
1698 timer(d)
1698 timer(d)
1699 fm.end()
1699 fm.end()
1700
1700
1701 @command(b'perffncachewrite', formatteropts)
1701 @command(b'perffncachewrite', formatteropts)
1702 def perffncachewrite(ui, repo, **opts):
1702 def perffncachewrite(ui, repo, **opts):
1703 opts = _byteskwargs(opts)
1703 opts = _byteskwargs(opts)
1704 timer, fm = gettimer(ui, opts)
1704 timer, fm = gettimer(ui, opts)
1705 s = repo.store
1705 s = repo.store
1706 lock = repo.lock()
1706 lock = repo.lock()
1707 s.fncache._load()
1707 s.fncache._load()
1708 tr = repo.transaction(b'perffncachewrite')
1708 tr = repo.transaction(b'perffncachewrite')
1709 tr.addbackup(b'fncache')
1709 tr.addbackup(b'fncache')
1710 def d():
1710 def d():
1711 s.fncache._dirty = True
1711 s.fncache._dirty = True
1712 s.fncache.write(tr)
1712 s.fncache.write(tr)
1713 timer(d)
1713 timer(d)
1714 tr.close()
1714 tr.close()
1715 lock.release()
1715 lock.release()
1716 fm.end()
1716 fm.end()
1717
1717
1718 @command(b'perffncacheencode', formatteropts)
1718 @command(b'perffncacheencode', formatteropts)
1719 def perffncacheencode(ui, repo, **opts):
1719 def perffncacheencode(ui, repo, **opts):
1720 opts = _byteskwargs(opts)
1720 opts = _byteskwargs(opts)
1721 timer, fm = gettimer(ui, opts)
1721 timer, fm = gettimer(ui, opts)
1722 s = repo.store
1722 s = repo.store
1723 s.fncache._load()
1723 s.fncache._load()
1724 def d():
1724 def d():
1725 for p in s.fncache.entries:
1725 for p in s.fncache.entries:
1726 s.encode(p)
1726 s.encode(p)
1727 timer(d)
1727 timer(d)
1728 fm.end()
1728 fm.end()
1729
1729
1730 def _bdiffworker(q, blocks, xdiff, ready, done):
1730 def _bdiffworker(q, blocks, xdiff, ready, done):
1731 while not done.is_set():
1731 while not done.is_set():
1732 pair = q.get()
1732 pair = q.get()
1733 while pair is not None:
1733 while pair is not None:
1734 if xdiff:
1734 if xdiff:
1735 mdiff.bdiff.xdiffblocks(*pair)
1735 mdiff.bdiff.xdiffblocks(*pair)
1736 elif blocks:
1736 elif blocks:
1737 mdiff.bdiff.blocks(*pair)
1737 mdiff.bdiff.blocks(*pair)
1738 else:
1738 else:
1739 mdiff.textdiff(*pair)
1739 mdiff.textdiff(*pair)
1740 q.task_done()
1740 q.task_done()
1741 pair = q.get()
1741 pair = q.get()
1742 q.task_done() # for the None one
1742 q.task_done() # for the None one
1743 with ready:
1743 with ready:
1744 ready.wait()
1744 ready.wait()
1745
1745
1746 def _manifestrevision(repo, mnode):
1746 def _manifestrevision(repo, mnode):
1747 ml = repo.manifestlog
1747 ml = repo.manifestlog
1748
1748
1749 if util.safehasattr(ml, b'getstorage'):
1749 if util.safehasattr(ml, b'getstorage'):
1750 store = ml.getstorage(b'')
1750 store = ml.getstorage(b'')
1751 else:
1751 else:
1752 store = ml._revlog
1752 store = ml._revlog
1753
1753
1754 return store.revision(mnode)
1754 return store.revision(mnode)
1755
1755
1756 @command(b'perfbdiff', revlogopts + formatteropts + [
1756 @command(b'perfbdiff', revlogopts + formatteropts + [
1757 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1757 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1758 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1758 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1759 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1759 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1760 (b'', b'blocks', False, b'test computing diffs into blocks'),
1760 (b'', b'blocks', False, b'test computing diffs into blocks'),
1761 (b'', b'xdiff', False, b'use xdiff algorithm'),
1761 (b'', b'xdiff', False, b'use xdiff algorithm'),
1762 ],
1762 ],
1763
1763
1764 b'-c|-m|FILE REV')
1764 b'-c|-m|FILE REV')
1765 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1765 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1766 """benchmark a bdiff between revisions
1766 """benchmark a bdiff between revisions
1767
1767
1768 By default, benchmark a bdiff between its delta parent and itself.
1768 By default, benchmark a bdiff between its delta parent and itself.
1769
1769
1770 With ``--count``, benchmark bdiffs between delta parents and self for N
1770 With ``--count``, benchmark bdiffs between delta parents and self for N
1771 revisions starting at the specified revision.
1771 revisions starting at the specified revision.
1772
1772
1773 With ``--alldata``, assume the requested revision is a changeset and
1773 With ``--alldata``, assume the requested revision is a changeset and
1774 measure bdiffs for all changes related to that changeset (manifest
1774 measure bdiffs for all changes related to that changeset (manifest
1775 and filelogs).
1775 and filelogs).
1776 """
1776 """
1777 opts = _byteskwargs(opts)
1777 opts = _byteskwargs(opts)
1778
1778
1779 if opts[b'xdiff'] and not opts[b'blocks']:
1779 if opts[b'xdiff'] and not opts[b'blocks']:
1780 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1780 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1781
1781
1782 if opts[b'alldata']:
1782 if opts[b'alldata']:
1783 opts[b'changelog'] = True
1783 opts[b'changelog'] = True
1784
1784
1785 if opts.get(b'changelog') or opts.get(b'manifest'):
1785 if opts.get(b'changelog') or opts.get(b'manifest'):
1786 file_, rev = None, file_
1786 file_, rev = None, file_
1787 elif rev is None:
1787 elif rev is None:
1788 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1788 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1789
1789
1790 blocks = opts[b'blocks']
1790 blocks = opts[b'blocks']
1791 xdiff = opts[b'xdiff']
1791 xdiff = opts[b'xdiff']
1792 textpairs = []
1792 textpairs = []
1793
1793
1794 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1794 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1795
1795
1796 startrev = r.rev(r.lookup(rev))
1796 startrev = r.rev(r.lookup(rev))
1797 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1797 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1798 if opts[b'alldata']:
1798 if opts[b'alldata']:
1799 # Load revisions associated with changeset.
1799 # Load revisions associated with changeset.
1800 ctx = repo[rev]
1800 ctx = repo[rev]
1801 mtext = _manifestrevision(repo, ctx.manifestnode())
1801 mtext = _manifestrevision(repo, ctx.manifestnode())
1802 for pctx in ctx.parents():
1802 for pctx in ctx.parents():
1803 pman = _manifestrevision(repo, pctx.manifestnode())
1803 pman = _manifestrevision(repo, pctx.manifestnode())
1804 textpairs.append((pman, mtext))
1804 textpairs.append((pman, mtext))
1805
1805
1806 # Load filelog revisions by iterating manifest delta.
1806 # Load filelog revisions by iterating manifest delta.
1807 man = ctx.manifest()
1807 man = ctx.manifest()
1808 pman = ctx.p1().manifest()
1808 pman = ctx.p1().manifest()
1809 for filename, change in pman.diff(man).items():
1809 for filename, change in pman.diff(man).items():
1810 fctx = repo.file(filename)
1810 fctx = repo.file(filename)
1811 f1 = fctx.revision(change[0][0] or -1)
1811 f1 = fctx.revision(change[0][0] or -1)
1812 f2 = fctx.revision(change[1][0] or -1)
1812 f2 = fctx.revision(change[1][0] or -1)
1813 textpairs.append((f1, f2))
1813 textpairs.append((f1, f2))
1814 else:
1814 else:
1815 dp = r.deltaparent(rev)
1815 dp = r.deltaparent(rev)
1816 textpairs.append((r.revision(dp), r.revision(rev)))
1816 textpairs.append((r.revision(dp), r.revision(rev)))
1817
1817
1818 withthreads = threads > 0
1818 withthreads = threads > 0
1819 if not withthreads:
1819 if not withthreads:
1820 def d():
1820 def d():
1821 for pair in textpairs:
1821 for pair in textpairs:
1822 if xdiff:
1822 if xdiff:
1823 mdiff.bdiff.xdiffblocks(*pair)
1823 mdiff.bdiff.xdiffblocks(*pair)
1824 elif blocks:
1824 elif blocks:
1825 mdiff.bdiff.blocks(*pair)
1825 mdiff.bdiff.blocks(*pair)
1826 else:
1826 else:
1827 mdiff.textdiff(*pair)
1827 mdiff.textdiff(*pair)
1828 else:
1828 else:
1829 q = queue()
1829 q = queue()
1830 for i in _xrange(threads):
1830 for i in _xrange(threads):
1831 q.put(None)
1831 q.put(None)
1832 ready = threading.Condition()
1832 ready = threading.Condition()
1833 done = threading.Event()
1833 done = threading.Event()
1834 for i in _xrange(threads):
1834 for i in _xrange(threads):
1835 threading.Thread(target=_bdiffworker,
1835 threading.Thread(target=_bdiffworker,
1836 args=(q, blocks, xdiff, ready, done)).start()
1836 args=(q, blocks, xdiff, ready, done)).start()
1837 q.join()
1837 q.join()
1838 def d():
1838 def d():
1839 for pair in textpairs:
1839 for pair in textpairs:
1840 q.put(pair)
1840 q.put(pair)
1841 for i in _xrange(threads):
1841 for i in _xrange(threads):
1842 q.put(None)
1842 q.put(None)
1843 with ready:
1843 with ready:
1844 ready.notify_all()
1844 ready.notify_all()
1845 q.join()
1845 q.join()
1846 timer, fm = gettimer(ui, opts)
1846 timer, fm = gettimer(ui, opts)
1847 timer(d)
1847 timer(d)
1848 fm.end()
1848 fm.end()
1849
1849
1850 if withthreads:
1850 if withthreads:
1851 done.set()
1851 done.set()
1852 for i in _xrange(threads):
1852 for i in _xrange(threads):
1853 q.put(None)
1853 q.put(None)
1854 with ready:
1854 with ready:
1855 ready.notify_all()
1855 ready.notify_all()
1856
1856
1857 @command(b'perfunidiff', revlogopts + formatteropts + [
1857 @command(b'perfunidiff', revlogopts + formatteropts + [
1858 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1858 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1859 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
1859 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
1860 ], b'-c|-m|FILE REV')
1860 ], b'-c|-m|FILE REV')
1861 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1861 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1862 """benchmark a unified diff between revisions
1862 """benchmark a unified diff between revisions
1863
1863
1864 This doesn't include any copy tracing - it's just a unified diff
1864 This doesn't include any copy tracing - it's just a unified diff
1865 of the texts.
1865 of the texts.
1866
1866
1867 By default, benchmark a diff between its delta parent and itself.
1867 By default, benchmark a diff between its delta parent and itself.
1868
1868
1869 With ``--count``, benchmark diffs between delta parents and self for N
1869 With ``--count``, benchmark diffs between delta parents and self for N
1870 revisions starting at the specified revision.
1870 revisions starting at the specified revision.
1871
1871
1872 With ``--alldata``, assume the requested revision is a changeset and
1872 With ``--alldata``, assume the requested revision is a changeset and
1873 measure diffs for all changes related to that changeset (manifest
1873 measure diffs for all changes related to that changeset (manifest
1874 and filelogs).
1874 and filelogs).
1875 """
1875 """
1876 opts = _byteskwargs(opts)
1876 opts = _byteskwargs(opts)
1877 if opts[b'alldata']:
1877 if opts[b'alldata']:
1878 opts[b'changelog'] = True
1878 opts[b'changelog'] = True
1879
1879
1880 if opts.get(b'changelog') or opts.get(b'manifest'):
1880 if opts.get(b'changelog') or opts.get(b'manifest'):
1881 file_, rev = None, file_
1881 file_, rev = None, file_
1882 elif rev is None:
1882 elif rev is None:
1883 raise error.CommandError(b'perfunidiff', b'invalid arguments')
1883 raise error.CommandError(b'perfunidiff', b'invalid arguments')
1884
1884
1885 textpairs = []
1885 textpairs = []
1886
1886
1887 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
1887 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
1888
1888
1889 startrev = r.rev(r.lookup(rev))
1889 startrev = r.rev(r.lookup(rev))
1890 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1890 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1891 if opts[b'alldata']:
1891 if opts[b'alldata']:
1892 # Load revisions associated with changeset.
1892 # Load revisions associated with changeset.
1893 ctx = repo[rev]
1893 ctx = repo[rev]
1894 mtext = _manifestrevision(repo, ctx.manifestnode())
1894 mtext = _manifestrevision(repo, ctx.manifestnode())
1895 for pctx in ctx.parents():
1895 for pctx in ctx.parents():
1896 pman = _manifestrevision(repo, pctx.manifestnode())
1896 pman = _manifestrevision(repo, pctx.manifestnode())
1897 textpairs.append((pman, mtext))
1897 textpairs.append((pman, mtext))
1898
1898
1899 # Load filelog revisions by iterating manifest delta.
1899 # Load filelog revisions by iterating manifest delta.
1900 man = ctx.manifest()
1900 man = ctx.manifest()
1901 pman = ctx.p1().manifest()
1901 pman = ctx.p1().manifest()
1902 for filename, change in pman.diff(man).items():
1902 for filename, change in pman.diff(man).items():
1903 fctx = repo.file(filename)
1903 fctx = repo.file(filename)
1904 f1 = fctx.revision(change[0][0] or -1)
1904 f1 = fctx.revision(change[0][0] or -1)
1905 f2 = fctx.revision(change[1][0] or -1)
1905 f2 = fctx.revision(change[1][0] or -1)
1906 textpairs.append((f1, f2))
1906 textpairs.append((f1, f2))
1907 else:
1907 else:
1908 dp = r.deltaparent(rev)
1908 dp = r.deltaparent(rev)
1909 textpairs.append((r.revision(dp), r.revision(rev)))
1909 textpairs.append((r.revision(dp), r.revision(rev)))
1910
1910
1911 def d():
1911 def d():
1912 for left, right in textpairs:
1912 for left, right in textpairs:
1913 # The date strings don't matter, so we pass empty strings.
1913 # The date strings don't matter, so we pass empty strings.
1914 headerlines, hunks = mdiff.unidiff(
1914 headerlines, hunks = mdiff.unidiff(
1915 left, b'', right, b'', b'left', b'right', binary=False)
1915 left, b'', right, b'', b'left', b'right', binary=False)
1916 # consume iterators in roughly the way patch.py does
1916 # consume iterators in roughly the way patch.py does
1917 b'\n'.join(headerlines)
1917 b'\n'.join(headerlines)
1918 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1918 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1919 timer, fm = gettimer(ui, opts)
1919 timer, fm = gettimer(ui, opts)
1920 timer(d)
1920 timer(d)
1921 fm.end()
1921 fm.end()
1922
1922
1923 @command(b'perfdiffwd', formatteropts)
1923 @command(b'perfdiffwd', formatteropts)
1924 def perfdiffwd(ui, repo, **opts):
1924 def perfdiffwd(ui, repo, **opts):
1925 """Profile diff of working directory changes"""
1925 """Profile diff of working directory changes"""
1926 opts = _byteskwargs(opts)
1926 opts = _byteskwargs(opts)
1927 timer, fm = gettimer(ui, opts)
1927 timer, fm = gettimer(ui, opts)
1928 options = {
1928 options = {
1929 'w': 'ignore_all_space',
1929 'w': 'ignore_all_space',
1930 'b': 'ignore_space_change',
1930 'b': 'ignore_space_change',
1931 'B': 'ignore_blank_lines',
1931 'B': 'ignore_blank_lines',
1932 }
1932 }
1933
1933
1934 for diffopt in ('', 'w', 'b', 'B', 'wB'):
1934 for diffopt in ('', 'w', 'b', 'B', 'wB'):
1935 opts = dict((options[c], b'1') for c in diffopt)
1935 opts = dict((options[c], b'1') for c in diffopt)
1936 def d():
1936 def d():
1937 ui.pushbuffer()
1937 ui.pushbuffer()
1938 commands.diff(ui, repo, **opts)
1938 commands.diff(ui, repo, **opts)
1939 ui.popbuffer()
1939 ui.popbuffer()
1940 diffopt = diffopt.encode('ascii')
1940 diffopt = diffopt.encode('ascii')
1941 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
1941 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
1942 timer(d, title=title)
1942 timer(d, title=title)
1943 fm.end()
1943 fm.end()
1944
1944
1945 @command(b'perfrevlogindex', revlogopts + formatteropts,
1945 @command(b'perfrevlogindex', revlogopts + formatteropts,
1946 b'-c|-m|FILE')
1946 b'-c|-m|FILE')
1947 def perfrevlogindex(ui, repo, file_=None, **opts):
1947 def perfrevlogindex(ui, repo, file_=None, **opts):
1948 """Benchmark operations against a revlog index.
1948 """Benchmark operations against a revlog index.
1949
1949
1950 This tests constructing a revlog instance, reading index data,
1950 This tests constructing a revlog instance, reading index data,
1951 parsing index data, and performing various operations related to
1951 parsing index data, and performing various operations related to
1952 index data.
1952 index data.
1953 """
1953 """
1954
1954
1955 opts = _byteskwargs(opts)
1955 opts = _byteskwargs(opts)
1956
1956
1957 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
1957 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
1958
1958
1959 opener = getattr(rl, 'opener') # trick linter
1959 opener = getattr(rl, 'opener') # trick linter
1960 indexfile = rl.indexfile
1960 indexfile = rl.indexfile
1961 data = opener.read(indexfile)
1961 data = opener.read(indexfile)
1962
1962
1963 header = struct.unpack(b'>I', data[0:4])[0]
1963 header = struct.unpack(b'>I', data[0:4])[0]
1964 version = header & 0xFFFF
1964 version = header & 0xFFFF
1965 if version == 1:
1965 if version == 1:
1966 revlogio = revlog.revlogio()
1966 revlogio = revlog.revlogio()
1967 inline = header & (1 << 16)
1967 inline = header & (1 << 16)
1968 else:
1968 else:
1969 raise error.Abort((b'unsupported revlog version: %d') % version)
1969 raise error.Abort((b'unsupported revlog version: %d') % version)
1970
1970
1971 rllen = len(rl)
1971 rllen = len(rl)
1972
1972
1973 node0 = rl.node(0)
1973 node0 = rl.node(0)
1974 node25 = rl.node(rllen // 4)
1974 node25 = rl.node(rllen // 4)
1975 node50 = rl.node(rllen // 2)
1975 node50 = rl.node(rllen // 2)
1976 node75 = rl.node(rllen // 4 * 3)
1976 node75 = rl.node(rllen // 4 * 3)
1977 node100 = rl.node(rllen - 1)
1977 node100 = rl.node(rllen - 1)
1978
1978
1979 allrevs = range(rllen)
1979 allrevs = range(rllen)
1980 allrevsrev = list(reversed(allrevs))
1980 allrevsrev = list(reversed(allrevs))
1981 allnodes = [rl.node(rev) for rev in range(rllen)]
1981 allnodes = [rl.node(rev) for rev in range(rllen)]
1982 allnodesrev = list(reversed(allnodes))
1982 allnodesrev = list(reversed(allnodes))
1983
1983
1984 def constructor():
1984 def constructor():
1985 revlog.revlog(opener, indexfile)
1985 revlog.revlog(opener, indexfile)
1986
1986
1987 def read():
1987 def read():
1988 with opener(indexfile) as fh:
1988 with opener(indexfile) as fh:
1989 fh.read()
1989 fh.read()
1990
1990
1991 def parseindex():
1991 def parseindex():
1992 revlogio.parseindex(data, inline)
1992 revlogio.parseindex(data, inline)
1993
1993
1994 def getentry(revornode):
1994 def getentry(revornode):
1995 index = revlogio.parseindex(data, inline)[0]
1995 index = revlogio.parseindex(data, inline)[0]
1996 index[revornode]
1996 index[revornode]
1997
1997
1998 def getentries(revs, count=1):
1998 def getentries(revs, count=1):
1999 index = revlogio.parseindex(data, inline)[0]
1999 index = revlogio.parseindex(data, inline)[0]
2000
2000
2001 for i in range(count):
2001 for i in range(count):
2002 for rev in revs:
2002 for rev in revs:
2003 index[rev]
2003 index[rev]
2004
2004
2005 def resolvenode(node):
2005 def resolvenode(node):
2006 nodemap = revlogio.parseindex(data, inline)[1]
2006 nodemap = revlogio.parseindex(data, inline)[1]
2007 # This only works for the C code.
2007 # This only works for the C code.
2008 if nodemap is None:
2008 if nodemap is None:
2009 return
2009 return
2010
2010
2011 try:
2011 try:
2012 nodemap[node]
2012 nodemap[node]
2013 except error.RevlogError:
2013 except error.RevlogError:
2014 pass
2014 pass
2015
2015
2016 def resolvenodes(nodes, count=1):
2016 def resolvenodes(nodes, count=1):
2017 nodemap = revlogio.parseindex(data, inline)[1]
2017 nodemap = revlogio.parseindex(data, inline)[1]
2018 if nodemap is None:
2018 if nodemap is None:
2019 return
2019 return
2020
2020
2021 for i in range(count):
2021 for i in range(count):
2022 for node in nodes:
2022 for node in nodes:
2023 try:
2023 try:
2024 nodemap[node]
2024 nodemap[node]
2025 except error.RevlogError:
2025 except error.RevlogError:
2026 pass
2026 pass
2027
2027
2028 benches = [
2028 benches = [
2029 (constructor, b'revlog constructor'),
2029 (constructor, b'revlog constructor'),
2030 (read, b'read'),
2030 (read, b'read'),
2031 (parseindex, b'create index object'),
2031 (parseindex, b'create index object'),
2032 (lambda: getentry(0), b'retrieve index entry for rev 0'),
2032 (lambda: getentry(0), b'retrieve index entry for rev 0'),
2033 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
2033 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
2034 (lambda: resolvenode(node0), b'look up node at rev 0'),
2034 (lambda: resolvenode(node0), b'look up node at rev 0'),
2035 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
2035 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
2036 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
2036 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
2037 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
2037 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
2038 (lambda: resolvenode(node100), b'look up node at tip'),
2038 (lambda: resolvenode(node100), b'look up node at tip'),
2039 # 2x variation is to measure caching impact.
2039 # 2x variation is to measure caching impact.
2040 (lambda: resolvenodes(allnodes),
2040 (lambda: resolvenodes(allnodes),
2041 b'look up all nodes (forward)'),
2041 b'look up all nodes (forward)'),
2042 (lambda: resolvenodes(allnodes, 2),
2042 (lambda: resolvenodes(allnodes, 2),
2043 b'look up all nodes 2x (forward)'),
2043 b'look up all nodes 2x (forward)'),
2044 (lambda: resolvenodes(allnodesrev),
2044 (lambda: resolvenodes(allnodesrev),
2045 b'look up all nodes (reverse)'),
2045 b'look up all nodes (reverse)'),
2046 (lambda: resolvenodes(allnodesrev, 2),
2046 (lambda: resolvenodes(allnodesrev, 2),
2047 b'look up all nodes 2x (reverse)'),
2047 b'look up all nodes 2x (reverse)'),
2048 (lambda: getentries(allrevs),
2048 (lambda: getentries(allrevs),
2049 b'retrieve all index entries (forward)'),
2049 b'retrieve all index entries (forward)'),
2050 (lambda: getentries(allrevs, 2),
2050 (lambda: getentries(allrevs, 2),
2051 b'retrieve all index entries 2x (forward)'),
2051 b'retrieve all index entries 2x (forward)'),
2052 (lambda: getentries(allrevsrev),
2052 (lambda: getentries(allrevsrev),
2053 b'retrieve all index entries (reverse)'),
2053 b'retrieve all index entries (reverse)'),
2054 (lambda: getentries(allrevsrev, 2),
2054 (lambda: getentries(allrevsrev, 2),
2055 b'retrieve all index entries 2x (reverse)'),
2055 b'retrieve all index entries 2x (reverse)'),
2056 ]
2056 ]
2057
2057
2058 for fn, title in benches:
2058 for fn, title in benches:
2059 timer, fm = gettimer(ui, opts)
2059 timer, fm = gettimer(ui, opts)
2060 timer(fn, title=title)
2060 timer(fn, title=title)
2061 fm.end()
2061 fm.end()
2062
2062
2063 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
2063 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
2064 [(b'd', b'dist', 100, b'distance between the revisions'),
2064 [(b'd', b'dist', 100, b'distance between the revisions'),
2065 (b's', b'startrev', 0, b'revision to start reading at'),
2065 (b's', b'startrev', 0, b'revision to start reading at'),
2066 (b'', b'reverse', False, b'read in reverse')],
2066 (b'', b'reverse', False, b'read in reverse')],
2067 b'-c|-m|FILE')
2067 b'-c|-m|FILE')
2068 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
2068 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
2069 **opts):
2069 **opts):
2070 """Benchmark reading a series of revisions from a revlog.
2070 """Benchmark reading a series of revisions from a revlog.
2071
2071
2072 By default, we read every ``-d/--dist`` revision from 0 to tip of
2072 By default, we read every ``-d/--dist`` revision from 0 to tip of
2073 the specified revlog.
2073 the specified revlog.
2074
2074
2075 The start revision can be defined via ``-s/--startrev``.
2075 The start revision can be defined via ``-s/--startrev``.
2076 """
2076 """
2077 opts = _byteskwargs(opts)
2077 opts = _byteskwargs(opts)
2078
2078
2079 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
2079 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
2080 rllen = getlen(ui)(rl)
2080 rllen = getlen(ui)(rl)
2081
2081
2082 if startrev < 0:
2082 if startrev < 0:
2083 startrev = rllen + startrev
2083 startrev = rllen + startrev
2084
2084
2085 def d():
2085 def d():
2086 rl.clearcaches()
2086 rl.clearcaches()
2087
2087
2088 beginrev = startrev
2088 beginrev = startrev
2089 endrev = rllen
2089 endrev = rllen
2090 dist = opts[b'dist']
2090 dist = opts[b'dist']
2091
2091
2092 if reverse:
2092 if reverse:
2093 beginrev, endrev = endrev - 1, beginrev - 1
2093 beginrev, endrev = endrev - 1, beginrev - 1
2094 dist = -1 * dist
2094 dist = -1 * dist
2095
2095
2096 for x in _xrange(beginrev, endrev, dist):
2096 for x in _xrange(beginrev, endrev, dist):
2097 # Old revisions don't support passing int.
2097 # Old revisions don't support passing int.
2098 n = rl.node(x)
2098 n = rl.node(x)
2099 rl.revision(n)
2099 rl.revision(n)
2100
2100
2101 timer, fm = gettimer(ui, opts)
2101 timer, fm = gettimer(ui, opts)
2102 timer(d)
2102 timer(d)
2103 fm.end()
2103 fm.end()
2104
2104
2105 @command(b'perfrevlogwrite', revlogopts + formatteropts +
2105 @command(b'perfrevlogwrite', revlogopts + formatteropts +
2106 [(b's', b'startrev', 1000, b'revision to start writing at'),
2106 [(b's', b'startrev', 1000, b'revision to start writing at'),
2107 (b'', b'stoprev', -1, b'last revision to write'),
2107 (b'', b'stoprev', -1, b'last revision to write'),
2108 (b'', b'count', 3, b'number of passes to perform'),
2108 (b'', b'count', 3, b'number of passes to perform'),
2109 (b'', b'details', False, b'print timing for every revisions tested'),
2109 (b'', b'details', False, b'print timing for every revisions tested'),
2110 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
2110 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
2111 (b'', b'lazydeltabase', True, b'try the provided delta first'),
2111 (b'', b'lazydeltabase', True, b'try the provided delta first'),
2112 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
2112 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
2113 ],
2113 ],
2114 b'-c|-m|FILE')
2114 b'-c|-m|FILE')
2115 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
2115 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
2116 """Benchmark writing a series of revisions to a revlog.
2116 """Benchmark writing a series of revisions to a revlog.
2117
2117
2118 Possible source values are:
2118 Possible source values are:
2119 * `full`: add from a full text (default).
2119 * `full`: add from a full text (default).
2120 * `parent-1`: add from a delta to the first parent
2120 * `parent-1`: add from a delta to the first parent
2121 * `parent-2`: add from a delta to the second parent if it exists
2121 * `parent-2`: add from a delta to the second parent if it exists
2122 (use a delta from the first parent otherwise)
2122 (use a delta from the first parent otherwise)
2123 * `parent-smallest`: add from the smallest delta (either p1 or p2)
2123 * `parent-smallest`: add from the smallest delta (either p1 or p2)
2124 * `storage`: add from the existing precomputed deltas
2124 * `storage`: add from the existing precomputed deltas
2125
2125
2126 Note: This performance command measures performance in a custom way. As a
2126 Note: This performance command measures performance in a custom way. As a
2127 result some of the global configuration of the 'perf' command does not
2127 result some of the global configuration of the 'perf' command does not
2128 apply to it:
2128 apply to it:
2129
2129
2130 * ``pre-run``: disabled
2130 * ``pre-run``: disabled
2131
2131
2132 * ``profile-benchmark``: disabled
2132 * ``profile-benchmark``: disabled
2133
2133
2134 * ``run-limits``: disabled use --count instead
2134 * ``run-limits``: disabled use --count instead
2135 """
2135 """
2136 opts = _byteskwargs(opts)
2136 opts = _byteskwargs(opts)
2137
2137
2138 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
2138 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
2139 rllen = getlen(ui)(rl)
2139 rllen = getlen(ui)(rl)
2140 if startrev < 0:
2140 if startrev < 0:
2141 startrev = rllen + startrev
2141 startrev = rllen + startrev
2142 if stoprev < 0:
2142 if stoprev < 0:
2143 stoprev = rllen + stoprev
2143 stoprev = rllen + stoprev
2144
2144
2145 lazydeltabase = opts['lazydeltabase']
2145 lazydeltabase = opts['lazydeltabase']
2146 source = opts['source']
2146 source = opts['source']
2147 clearcaches = opts['clear_caches']
2147 clearcaches = opts['clear_caches']
2148 validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest',
2148 validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest',
2149 b'storage')
2149 b'storage')
2150 if source not in validsource:
2150 if source not in validsource:
2151 raise error.Abort('invalid source type: %s' % source)
2151 raise error.Abort('invalid source type: %s' % source)
2152
2152
2153 ### actually gather results
2153 ### actually gather results
2154 count = opts['count']
2154 count = opts['count']
2155 if count <= 0:
2155 if count <= 0:
2156 raise error.Abort('invalide run count: %d' % count)
2156 raise error.Abort('invalide run count: %d' % count)
2157 allresults = []
2157 allresults = []
2158 for c in range(count):
2158 for c in range(count):
2159 timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1,
2159 timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1,
2160 lazydeltabase=lazydeltabase,
2160 lazydeltabase=lazydeltabase,
2161 clearcaches=clearcaches)
2161 clearcaches=clearcaches)
2162 allresults.append(timing)
2162 allresults.append(timing)
2163
2163
2164 ### consolidate the results in a single list
2164 ### consolidate the results in a single list
2165 results = []
2165 results = []
2166 for idx, (rev, t) in enumerate(allresults[0]):
2166 for idx, (rev, t) in enumerate(allresults[0]):
2167 ts = [t]
2167 ts = [t]
2168 for other in allresults[1:]:
2168 for other in allresults[1:]:
2169 orev, ot = other[idx]
2169 orev, ot = other[idx]
2170 assert orev == rev
2170 assert orev == rev
2171 ts.append(ot)
2171 ts.append(ot)
2172 results.append((rev, ts))
2172 results.append((rev, ts))
2173 resultcount = len(results)
2173 resultcount = len(results)
2174
2174
2175 ### Compute and display relevant statistics
2175 ### Compute and display relevant statistics
2176
2176
2177 # get a formatter
2177 # get a formatter
2178 fm = ui.formatter(b'perf', opts)
2178 fm = ui.formatter(b'perf', opts)
2179 displayall = ui.configbool(b"perf", b"all-timing", False)
2179 displayall = ui.configbool(b"perf", b"all-timing", False)
2180
2180
2181 # print individual details if requested
2181 # print individual details if requested
2182 if opts['details']:
2182 if opts['details']:
2183 for idx, item in enumerate(results, 1):
2183 for idx, item in enumerate(results, 1):
2184 rev, data = item
2184 rev, data = item
2185 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
2185 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
2186 formatone(fm, data, title=title, displayall=displayall)
2186 formatone(fm, data, title=title, displayall=displayall)
2187
2187
2188 # sorts results by median time
2188 # sorts results by median time
2189 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
2189 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
2190 # list of (name, index) to display)
2190 # list of (name, index) to display)
2191 relevants = [
2191 relevants = [
2192 ("min", 0),
2192 ("min", 0),
2193 ("10%", resultcount * 10 // 100),
2193 ("10%", resultcount * 10 // 100),
2194 ("25%", resultcount * 25 // 100),
2194 ("25%", resultcount * 25 // 100),
2195 ("50%", resultcount * 70 // 100),
2195 ("50%", resultcount * 70 // 100),
2196 ("75%", resultcount * 75 // 100),
2196 ("75%", resultcount * 75 // 100),
2197 ("90%", resultcount * 90 // 100),
2197 ("90%", resultcount * 90 // 100),
2198 ("95%", resultcount * 95 // 100),
2198 ("95%", resultcount * 95 // 100),
2199 ("99%", resultcount * 99 // 100),
2199 ("99%", resultcount * 99 // 100),
2200 ("99.9%", resultcount * 999 // 1000),
2200 ("99.9%", resultcount * 999 // 1000),
2201 ("99.99%", resultcount * 9999 // 10000),
2201 ("99.99%", resultcount * 9999 // 10000),
2202 ("99.999%", resultcount * 99999 // 100000),
2202 ("99.999%", resultcount * 99999 // 100000),
2203 ("max", -1),
2203 ("max", -1),
2204 ]
2204 ]
2205 if not ui.quiet:
2205 if not ui.quiet:
2206 for name, idx in relevants:
2206 for name, idx in relevants:
2207 data = results[idx]
2207 data = results[idx]
2208 title = '%s of %d, rev %d' % (name, resultcount, data[0])
2208 title = '%s of %d, rev %d' % (name, resultcount, data[0])
2209 formatone(fm, data[1], title=title, displayall=displayall)
2209 formatone(fm, data[1], title=title, displayall=displayall)
2210
2210
2211 # XXX summing that many float will not be very precise, we ignore this fact
2211 # XXX summing that many float will not be very precise, we ignore this fact
2212 # for now
2212 # for now
2213 totaltime = []
2213 totaltime = []
2214 for item in allresults:
2214 for item in allresults:
2215 totaltime.append((sum(x[1][0] for x in item),
2215 totaltime.append((sum(x[1][0] for x in item),
2216 sum(x[1][1] for x in item),
2216 sum(x[1][1] for x in item),
2217 sum(x[1][2] for x in item),)
2217 sum(x[1][2] for x in item),)
2218 )
2218 )
2219 formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
2219 formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
2220 displayall=displayall)
2220 displayall=displayall)
2221 fm.end()
2221 fm.end()
2222
2222
2223 class _faketr(object):
2223 class _faketr(object):
2224 def add(s, x, y, z=None):
2224 def add(s, x, y, z=None):
2225 return None
2225 return None
2226
2226
2227 def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None,
2227 def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None,
2228 lazydeltabase=True, clearcaches=True):
2228 lazydeltabase=True, clearcaches=True):
2229 timings = []
2229 timings = []
2230 tr = _faketr()
2230 tr = _faketr()
2231 with _temprevlog(ui, orig, startrev) as dest:
2231 with _temprevlog(ui, orig, startrev) as dest:
2232 dest._lazydeltabase = lazydeltabase
2232 dest._lazydeltabase = lazydeltabase
2233 revs = list(orig.revs(startrev, stoprev))
2233 revs = list(orig.revs(startrev, stoprev))
2234 total = len(revs)
2234 total = len(revs)
2235 topic = 'adding'
2235 topic = 'adding'
2236 if runidx is not None:
2236 if runidx is not None:
2237 topic += ' (run #%d)' % runidx
2237 topic += ' (run #%d)' % runidx
2238 # Support both old and new progress API
2238 # Support both old and new progress API
2239 if util.safehasattr(ui, 'makeprogress'):
2239 if util.safehasattr(ui, 'makeprogress'):
2240 progress = ui.makeprogress(topic, unit='revs', total=total)
2240 progress = ui.makeprogress(topic, unit='revs', total=total)
2241 def updateprogress(pos):
2241 def updateprogress(pos):
2242 progress.update(pos)
2242 progress.update(pos)
2243 def completeprogress():
2243 def completeprogress():
2244 progress.complete()
2244 progress.complete()
2245 else:
2245 else:
2246 def updateprogress(pos):
2246 def updateprogress(pos):
2247 ui.progress(topic, pos, unit='revs', total=total)
2247 ui.progress(topic, pos, unit='revs', total=total)
2248 def completeprogress():
2248 def completeprogress():
2249 ui.progress(topic, None, unit='revs', total=total)
2249 ui.progress(topic, None, unit='revs', total=total)
2250
2250
2251 for idx, rev in enumerate(revs):
2251 for idx, rev in enumerate(revs):
2252 updateprogress(idx)
2252 updateprogress(idx)
2253 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
2253 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
2254 if clearcaches:
2254 if clearcaches:
2255 dest.index.clearcaches()
2255 dest.index.clearcaches()
2256 dest.clearcaches()
2256 dest.clearcaches()
2257 with timeone() as r:
2257 with timeone() as r:
2258 dest.addrawrevision(*addargs, **addkwargs)
2258 dest.addrawrevision(*addargs, **addkwargs)
2259 timings.append((rev, r[0]))
2259 timings.append((rev, r[0]))
2260 updateprogress(total)
2260 updateprogress(total)
2261 completeprogress()
2261 completeprogress()
2262 return timings
2262 return timings
2263
2263
2264 def _getrevisionseed(orig, rev, tr, source):
2264 def _getrevisionseed(orig, rev, tr, source):
2265 from mercurial.node import nullid
2265 from mercurial.node import nullid
2266
2266
2267 linkrev = orig.linkrev(rev)
2267 linkrev = orig.linkrev(rev)
2268 node = orig.node(rev)
2268 node = orig.node(rev)
2269 p1, p2 = orig.parents(node)
2269 p1, p2 = orig.parents(node)
2270 flags = orig.flags(rev)
2270 flags = orig.flags(rev)
2271 cachedelta = None
2271 cachedelta = None
2272 text = None
2272 text = None
2273
2273
2274 if source == b'full':
2274 if source == b'full':
2275 text = orig.revision(rev)
2275 text = orig.revision(rev)
2276 elif source == b'parent-1':
2276 elif source == b'parent-1':
2277 baserev = orig.rev(p1)
2277 baserev = orig.rev(p1)
2278 cachedelta = (baserev, orig.revdiff(p1, rev))
2278 cachedelta = (baserev, orig.revdiff(p1, rev))
2279 elif source == b'parent-2':
2279 elif source == b'parent-2':
2280 parent = p2
2280 parent = p2
2281 if p2 == nullid:
2281 if p2 == nullid:
2282 parent = p1
2282 parent = p1
2283 baserev = orig.rev(parent)
2283 baserev = orig.rev(parent)
2284 cachedelta = (baserev, orig.revdiff(parent, rev))
2284 cachedelta = (baserev, orig.revdiff(parent, rev))
2285 elif source == b'parent-smallest':
2285 elif source == b'parent-smallest':
2286 p1diff = orig.revdiff(p1, rev)
2286 p1diff = orig.revdiff(p1, rev)
2287 parent = p1
2287 parent = p1
2288 diff = p1diff
2288 diff = p1diff
2289 if p2 != nullid:
2289 if p2 != nullid:
2290 p2diff = orig.revdiff(p2, rev)
2290 p2diff = orig.revdiff(p2, rev)
2291 if len(p1diff) > len(p2diff):
2291 if len(p1diff) > len(p2diff):
2292 parent = p2
2292 parent = p2
2293 diff = p2diff
2293 diff = p2diff
2294 baserev = orig.rev(parent)
2294 baserev = orig.rev(parent)
2295 cachedelta = (baserev, diff)
2295 cachedelta = (baserev, diff)
2296 elif source == b'storage':
2296 elif source == b'storage':
2297 baserev = orig.deltaparent(rev)
2297 baserev = orig.deltaparent(rev)
2298 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
2298 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
2299
2299
2300 return ((text, tr, linkrev, p1, p2),
2300 return ((text, tr, linkrev, p1, p2),
2301 {'node': node, 'flags': flags, 'cachedelta': cachedelta})
2301 {'node': node, 'flags': flags, 'cachedelta': cachedelta})
2302
2302
2303 @contextlib.contextmanager
2303 @contextlib.contextmanager
2304 def _temprevlog(ui, orig, truncaterev):
2304 def _temprevlog(ui, orig, truncaterev):
2305 from mercurial import vfs as vfsmod
2305 from mercurial import vfs as vfsmod
2306
2306
2307 if orig._inline:
2307 if orig._inline:
2308 raise error.Abort('not supporting inline revlog (yet)')
2308 raise error.Abort('not supporting inline revlog (yet)')
2309 revlogkwargs = {}
2309 revlogkwargs = {}
2310 k = 'upperboundcomp'
2310 k = 'upperboundcomp'
2311 if util.safehasattr(orig, k):
2311 if util.safehasattr(orig, k):
2312 revlogkwargs[k] = getattr(orig, k)
2312 revlogkwargs[k] = getattr(orig, k)
2313
2313
2314 origindexpath = orig.opener.join(orig.indexfile)
2314 origindexpath = orig.opener.join(orig.indexfile)
2315 origdatapath = orig.opener.join(orig.datafile)
2315 origdatapath = orig.opener.join(orig.datafile)
2316 indexname = 'revlog.i'
2316 indexname = 'revlog.i'
2317 dataname = 'revlog.d'
2317 dataname = 'revlog.d'
2318
2318
2319 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
2319 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
2320 try:
2320 try:
2321 # copy the data file in a temporary directory
2321 # copy the data file in a temporary directory
2322 ui.debug('copying data in %s\n' % tmpdir)
2322 ui.debug('copying data in %s\n' % tmpdir)
2323 destindexpath = os.path.join(tmpdir, 'revlog.i')
2323 destindexpath = os.path.join(tmpdir, 'revlog.i')
2324 destdatapath = os.path.join(tmpdir, 'revlog.d')
2324 destdatapath = os.path.join(tmpdir, 'revlog.d')
2325 shutil.copyfile(origindexpath, destindexpath)
2325 shutil.copyfile(origindexpath, destindexpath)
2326 shutil.copyfile(origdatapath, destdatapath)
2326 shutil.copyfile(origdatapath, destdatapath)
2327
2327
2328 # remove the data we want to add again
2328 # remove the data we want to add again
2329 ui.debug('truncating data to be rewritten\n')
2329 ui.debug('truncating data to be rewritten\n')
2330 with open(destindexpath, 'ab') as index:
2330 with open(destindexpath, 'ab') as index:
2331 index.seek(0)
2331 index.seek(0)
2332 index.truncate(truncaterev * orig._io.size)
2332 index.truncate(truncaterev * orig._io.size)
2333 with open(destdatapath, 'ab') as data:
2333 with open(destdatapath, 'ab') as data:
2334 data.seek(0)
2334 data.seek(0)
2335 data.truncate(orig.start(truncaterev))
2335 data.truncate(orig.start(truncaterev))
2336
2336
2337 # instantiate a new revlog from the temporary copy
2337 # instantiate a new revlog from the temporary copy
2338 ui.debug('truncating adding to be rewritten\n')
2338 ui.debug('truncating adding to be rewritten\n')
2339 vfs = vfsmod.vfs(tmpdir)
2339 vfs = vfsmod.vfs(tmpdir)
2340 vfs.options = getattr(orig.opener, 'options', None)
2340 vfs.options = getattr(orig.opener, 'options', None)
2341
2341
2342 dest = revlog.revlog(vfs,
2342 dest = revlog.revlog(vfs,
2343 indexfile=indexname,
2343 indexfile=indexname,
2344 datafile=dataname, **revlogkwargs)
2344 datafile=dataname, **revlogkwargs)
2345 if dest._inline:
2345 if dest._inline:
2346 raise error.Abort('not supporting inline revlog (yet)')
2346 raise error.Abort('not supporting inline revlog (yet)')
2347 # make sure internals are initialized
2347 # make sure internals are initialized
2348 dest.revision(len(dest) - 1)
2348 dest.revision(len(dest) - 1)
2349 yield dest
2349 yield dest
2350 del dest, vfs
2350 del dest, vfs
2351 finally:
2351 finally:
2352 shutil.rmtree(tmpdir, True)
2352 shutil.rmtree(tmpdir, True)
2353
2353
2354 @command(b'perfrevlogchunks', revlogopts + formatteropts +
2354 @command(b'perfrevlogchunks', revlogopts + formatteropts +
2355 [(b'e', b'engines', b'', b'compression engines to use'),
2355 [(b'e', b'engines', b'', b'compression engines to use'),
2356 (b's', b'startrev', 0, b'revision to start at')],
2356 (b's', b'startrev', 0, b'revision to start at')],
2357 b'-c|-m|FILE')
2357 b'-c|-m|FILE')
2358 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
2358 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
2359 """Benchmark operations on revlog chunks.
2359 """Benchmark operations on revlog chunks.
2360
2360
2361 Logically, each revlog is a collection of fulltext revisions. However,
2361 Logically, each revlog is a collection of fulltext revisions. However,
2362 stored within each revlog are "chunks" of possibly compressed data. This
2362 stored within each revlog are "chunks" of possibly compressed data. This
2363 data needs to be read and decompressed or compressed and written.
2363 data needs to be read and decompressed or compressed and written.
2364
2364
2365 This command measures the time it takes to read+decompress and recompress
2365 This command measures the time it takes to read+decompress and recompress
2366 chunks in a revlog. It effectively isolates I/O and compression performance.
2366 chunks in a revlog. It effectively isolates I/O and compression performance.
2367 For measurements of higher-level operations like resolving revisions,
2367 For measurements of higher-level operations like resolving revisions,
2368 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
2368 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
2369 """
2369 """
2370 opts = _byteskwargs(opts)
2370 opts = _byteskwargs(opts)
2371
2371
2372 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
2372 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
2373
2373
2374 # _chunkraw was renamed to _getsegmentforrevs.
2374 # _chunkraw was renamed to _getsegmentforrevs.
2375 try:
2375 try:
2376 segmentforrevs = rl._getsegmentforrevs
2376 segmentforrevs = rl._getsegmentforrevs
2377 except AttributeError:
2377 except AttributeError:
2378 segmentforrevs = rl._chunkraw
2378 segmentforrevs = rl._chunkraw
2379
2379
2380 # Verify engines argument.
2380 # Verify engines argument.
2381 if engines:
2381 if engines:
2382 engines = set(e.strip() for e in engines.split(b','))
2382 engines = set(e.strip() for e in engines.split(b','))
2383 for engine in engines:
2383 for engine in engines:
2384 try:
2384 try:
2385 util.compressionengines[engine]
2385 util.compressionengines[engine]
2386 except KeyError:
2386 except KeyError:
2387 raise error.Abort(b'unknown compression engine: %s' % engine)
2387 raise error.Abort(b'unknown compression engine: %s' % engine)
2388 else:
2388 else:
2389 engines = []
2389 engines = []
2390 for e in util.compengines:
2390 for e in util.compengines:
2391 engine = util.compengines[e]
2391 engine = util.compengines[e]
2392 try:
2392 try:
2393 if engine.available():
2393 if engine.available():
2394 engine.revlogcompressor().compress(b'dummy')
2394 engine.revlogcompressor().compress(b'dummy')
2395 engines.append(e)
2395 engines.append(e)
2396 except NotImplementedError:
2396 except NotImplementedError:
2397 pass
2397 pass
2398
2398
2399 revs = list(rl.revs(startrev, len(rl) - 1))
2399 revs = list(rl.revs(startrev, len(rl) - 1))
2400
2400
2401 def rlfh(rl):
2401 def rlfh(rl):
2402 if rl._inline:
2402 if rl._inline:
2403 return getsvfs(repo)(rl.indexfile)
2403 return getsvfs(repo)(rl.indexfile)
2404 else:
2404 else:
2405 return getsvfs(repo)(rl.datafile)
2405 return getsvfs(repo)(rl.datafile)
2406
2406
2407 def doread():
2407 def doread():
2408 rl.clearcaches()
2408 rl.clearcaches()
2409 for rev in revs:
2409 for rev in revs:
2410 segmentforrevs(rev, rev)
2410 segmentforrevs(rev, rev)
2411
2411
2412 def doreadcachedfh():
2412 def doreadcachedfh():
2413 rl.clearcaches()
2413 rl.clearcaches()
2414 fh = rlfh(rl)
2414 fh = rlfh(rl)
2415 for rev in revs:
2415 for rev in revs:
2416 segmentforrevs(rev, rev, df=fh)
2416 segmentforrevs(rev, rev, df=fh)
2417
2417
2418 def doreadbatch():
2418 def doreadbatch():
2419 rl.clearcaches()
2419 rl.clearcaches()
2420 segmentforrevs(revs[0], revs[-1])
2420 segmentforrevs(revs[0], revs[-1])
2421
2421
2422 def doreadbatchcachedfh():
2422 def doreadbatchcachedfh():
2423 rl.clearcaches()
2423 rl.clearcaches()
2424 fh = rlfh(rl)
2424 fh = rlfh(rl)
2425 segmentforrevs(revs[0], revs[-1], df=fh)
2425 segmentforrevs(revs[0], revs[-1], df=fh)
2426
2426
2427 def dochunk():
2427 def dochunk():
2428 rl.clearcaches()
2428 rl.clearcaches()
2429 fh = rlfh(rl)
2429 fh = rlfh(rl)
2430 for rev in revs:
2430 for rev in revs:
2431 rl._chunk(rev, df=fh)
2431 rl._chunk(rev, df=fh)
2432
2432
2433 chunks = [None]
2433 chunks = [None]
2434
2434
2435 def dochunkbatch():
2435 def dochunkbatch():
2436 rl.clearcaches()
2436 rl.clearcaches()
2437 fh = rlfh(rl)
2437 fh = rlfh(rl)
2438 # Save chunks as a side-effect.
2438 # Save chunks as a side-effect.
2439 chunks[0] = rl._chunks(revs, df=fh)
2439 chunks[0] = rl._chunks(revs, df=fh)
2440
2440
2441 def docompress(compressor):
2441 def docompress(compressor):
2442 rl.clearcaches()
2442 rl.clearcaches()
2443
2443
2444 try:
2444 try:
2445 # Swap in the requested compression engine.
2445 # Swap in the requested compression engine.
2446 oldcompressor = rl._compressor
2446 oldcompressor = rl._compressor
2447 rl._compressor = compressor
2447 rl._compressor = compressor
2448 for chunk in chunks[0]:
2448 for chunk in chunks[0]:
2449 rl.compress(chunk)
2449 rl.compress(chunk)
2450 finally:
2450 finally:
2451 rl._compressor = oldcompressor
2451 rl._compressor = oldcompressor
2452
2452
2453 benches = [
2453 benches = [
2454 (lambda: doread(), b'read'),
2454 (lambda: doread(), b'read'),
2455 (lambda: doreadcachedfh(), b'read w/ reused fd'),
2455 (lambda: doreadcachedfh(), b'read w/ reused fd'),
2456 (lambda: doreadbatch(), b'read batch'),
2456 (lambda: doreadbatch(), b'read batch'),
2457 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
2457 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
2458 (lambda: dochunk(), b'chunk'),
2458 (lambda: dochunk(), b'chunk'),
2459 (lambda: dochunkbatch(), b'chunk batch'),
2459 (lambda: dochunkbatch(), b'chunk batch'),
2460 ]
2460 ]
2461
2461
2462 for engine in sorted(engines):
2462 for engine in sorted(engines):
2463 compressor = util.compengines[engine].revlogcompressor()
2463 compressor = util.compengines[engine].revlogcompressor()
2464 benches.append((functools.partial(docompress, compressor),
2464 benches.append((functools.partial(docompress, compressor),
2465 b'compress w/ %s' % engine))
2465 b'compress w/ %s' % engine))
2466
2466
2467 for fn, title in benches:
2467 for fn, title in benches:
2468 timer, fm = gettimer(ui, opts)
2468 timer, fm = gettimer(ui, opts)
2469 timer(fn, title=title)
2469 timer(fn, title=title)
2470 fm.end()
2470 fm.end()
2471
2471
2472 @command(b'perfrevlogrevision', revlogopts + formatteropts +
2472 @command(b'perfrevlogrevision', revlogopts + formatteropts +
2473 [(b'', b'cache', False, b'use caches instead of clearing')],
2473 [(b'', b'cache', False, b'use caches instead of clearing')],
2474 b'-c|-m|FILE REV')
2474 b'-c|-m|FILE REV')
2475 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
2475 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
2476 """Benchmark obtaining a revlog revision.
2476 """Benchmark obtaining a revlog revision.
2477
2477
2478 Obtaining a revlog revision consists of roughly the following steps:
2478 Obtaining a revlog revision consists of roughly the following steps:
2479
2479
2480 1. Compute the delta chain
2480 1. Compute the delta chain
2481 2. Slice the delta chain if applicable
2481 2. Slice the delta chain if applicable
2482 3. Obtain the raw chunks for that delta chain
2482 3. Obtain the raw chunks for that delta chain
2483 4. Decompress each raw chunk
2483 4. Decompress each raw chunk
2484 5. Apply binary patches to obtain fulltext
2484 5. Apply binary patches to obtain fulltext
2485 6. Verify hash of fulltext
2485 6. Verify hash of fulltext
2486
2486
2487 This command measures the time spent in each of these phases.
2487 This command measures the time spent in each of these phases.
2488 """
2488 """
2489 opts = _byteskwargs(opts)
2489 opts = _byteskwargs(opts)
2490
2490
2491 if opts.get(b'changelog') or opts.get(b'manifest'):
2491 if opts.get(b'changelog') or opts.get(b'manifest'):
2492 file_, rev = None, file_
2492 file_, rev = None, file_
2493 elif rev is None:
2493 elif rev is None:
2494 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
2494 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
2495
2495
2496 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
2496 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
2497
2497
2498 # _chunkraw was renamed to _getsegmentforrevs.
2498 # _chunkraw was renamed to _getsegmentforrevs.
2499 try:
2499 try:
2500 segmentforrevs = r._getsegmentforrevs
2500 segmentforrevs = r._getsegmentforrevs
2501 except AttributeError:
2501 except AttributeError:
2502 segmentforrevs = r._chunkraw
2502 segmentforrevs = r._chunkraw
2503
2503
2504 node = r.lookup(rev)
2504 node = r.lookup(rev)
2505 rev = r.rev(node)
2505 rev = r.rev(node)
2506
2506
2507 def getrawchunks(data, chain):
2507 def getrawchunks(data, chain):
2508 start = r.start
2508 start = r.start
2509 length = r.length
2509 length = r.length
2510 inline = r._inline
2510 inline = r._inline
2511 iosize = r._io.size
2511 iosize = r._io.size
2512 buffer = util.buffer
2512 buffer = util.buffer
2513
2513
2514 chunks = []
2514 chunks = []
2515 ladd = chunks.append
2515 ladd = chunks.append
2516 for idx, item in enumerate(chain):
2516 for idx, item in enumerate(chain):
2517 offset = start(item[0])
2517 offset = start(item[0])
2518 bits = data[idx]
2518 bits = data[idx]
2519 for rev in item:
2519 for rev in item:
2520 chunkstart = start(rev)
2520 chunkstart = start(rev)
2521 if inline:
2521 if inline:
2522 chunkstart += (rev + 1) * iosize
2522 chunkstart += (rev + 1) * iosize
2523 chunklength = length(rev)
2523 chunklength = length(rev)
2524 ladd(buffer(bits, chunkstart - offset, chunklength))
2524 ladd(buffer(bits, chunkstart - offset, chunklength))
2525
2525
2526 return chunks
2526 return chunks
2527
2527
2528 def dodeltachain(rev):
2528 def dodeltachain(rev):
2529 if not cache:
2529 if not cache:
2530 r.clearcaches()
2530 r.clearcaches()
2531 r._deltachain(rev)
2531 r._deltachain(rev)
2532
2532
2533 def doread(chain):
2533 def doread(chain):
2534 if not cache:
2534 if not cache:
2535 r.clearcaches()
2535 r.clearcaches()
2536 for item in slicedchain:
2536 for item in slicedchain:
2537 segmentforrevs(item[0], item[-1])
2537 segmentforrevs(item[0], item[-1])
2538
2538
2539 def doslice(r, chain, size):
2539 def doslice(r, chain, size):
2540 for s in slicechunk(r, chain, targetsize=size):
2540 for s in slicechunk(r, chain, targetsize=size):
2541 pass
2541 pass
2542
2542
2543 def dorawchunks(data, chain):
2543 def dorawchunks(data, chain):
2544 if not cache:
2544 if not cache:
2545 r.clearcaches()
2545 r.clearcaches()
2546 getrawchunks(data, chain)
2546 getrawchunks(data, chain)
2547
2547
2548 def dodecompress(chunks):
2548 def dodecompress(chunks):
2549 decomp = r.decompress
2549 decomp = r.decompress
2550 for chunk in chunks:
2550 for chunk in chunks:
2551 decomp(chunk)
2551 decomp(chunk)
2552
2552
2553 def dopatch(text, bins):
2553 def dopatch(text, bins):
2554 if not cache:
2554 if not cache:
2555 r.clearcaches()
2555 r.clearcaches()
2556 mdiff.patches(text, bins)
2556 mdiff.patches(text, bins)
2557
2557
2558 def dohash(text):
2558 def dohash(text):
2559 if not cache:
2559 if not cache:
2560 r.clearcaches()
2560 r.clearcaches()
2561 r.checkhash(text, node, rev=rev)
2561 r.checkhash(text, node, rev=rev)
2562
2562
2563 def dorevision():
2563 def dorevision():
2564 if not cache:
2564 if not cache:
2565 r.clearcaches()
2565 r.clearcaches()
2566 r.revision(node)
2566 r.revision(node)
2567
2567
2568 try:
2568 try:
2569 from mercurial.revlogutils.deltas import slicechunk
2569 from mercurial.revlogutils.deltas import slicechunk
2570 except ImportError:
2570 except ImportError:
2571 slicechunk = getattr(revlog, '_slicechunk', None)
2571 slicechunk = getattr(revlog, '_slicechunk', None)
2572
2572
2573 size = r.length(rev)
2573 size = r.length(rev)
2574 chain = r._deltachain(rev)[0]
2574 chain = r._deltachain(rev)[0]
2575 if not getattr(r, '_withsparseread', False):
2575 if not getattr(r, '_withsparseread', False):
2576 slicedchain = (chain,)
2576 slicedchain = (chain,)
2577 else:
2577 else:
2578 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
2578 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
2579 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
2579 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
2580 rawchunks = getrawchunks(data, slicedchain)
2580 rawchunks = getrawchunks(data, slicedchain)
2581 bins = r._chunks(chain)
2581 bins = r._chunks(chain)
2582 text = bytes(bins[0])
2582 text = bytes(bins[0])
2583 bins = bins[1:]
2583 bins = bins[1:]
2584 text = mdiff.patches(text, bins)
2584 text = mdiff.patches(text, bins)
2585
2585
2586 benches = [
2586 benches = [
2587 (lambda: dorevision(), b'full'),
2587 (lambda: dorevision(), b'full'),
2588 (lambda: dodeltachain(rev), b'deltachain'),
2588 (lambda: dodeltachain(rev), b'deltachain'),
2589 (lambda: doread(chain), b'read'),
2589 (lambda: doread(chain), b'read'),
2590 ]
2590 ]
2591
2591
2592 if getattr(r, '_withsparseread', False):
2592 if getattr(r, '_withsparseread', False):
2593 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
2593 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
2594 benches.append(slicing)
2594 benches.append(slicing)
2595
2595
2596 benches.extend([
2596 benches.extend([
2597 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
2597 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
2598 (lambda: dodecompress(rawchunks), b'decompress'),
2598 (lambda: dodecompress(rawchunks), b'decompress'),
2599 (lambda: dopatch(text, bins), b'patch'),
2599 (lambda: dopatch(text, bins), b'patch'),
2600 (lambda: dohash(text), b'hash'),
2600 (lambda: dohash(text), b'hash'),
2601 ])
2601 ])
2602
2602
2603 timer, fm = gettimer(ui, opts)
2603 timer, fm = gettimer(ui, opts)
2604 for fn, title in benches:
2604 for fn, title in benches:
2605 timer(fn, title=title)
2605 timer(fn, title=title)
2606 fm.end()
2606 fm.end()
2607
2607
2608 @command(b'perfrevset',
2608 @command(b'perfrevset',
2609 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
2609 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
2610 (b'', b'contexts', False, b'obtain changectx for each revision')]
2610 (b'', b'contexts', False, b'obtain changectx for each revision')]
2611 + formatteropts, b"REVSET")
2611 + formatteropts, b"REVSET")
2612 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
2612 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
2613 """benchmark the execution time of a revset
2613 """benchmark the execution time of a revset
2614
2614
2615 Use the --clean option if need to evaluate the impact of build volatile
2615 Use the --clean option if need to evaluate the impact of build volatile
2616 revisions set cache on the revset execution. Volatile cache hold filtered
2616 revisions set cache on the revset execution. Volatile cache hold filtered
2617 and obsolete related cache."""
2617 and obsolete related cache."""
2618 opts = _byteskwargs(opts)
2618 opts = _byteskwargs(opts)
2619
2619
2620 timer, fm = gettimer(ui, opts)
2620 timer, fm = gettimer(ui, opts)
2621 def d():
2621 def d():
2622 if clear:
2622 if clear:
2623 repo.invalidatevolatilesets()
2623 repo.invalidatevolatilesets()
2624 if contexts:
2624 if contexts:
2625 for ctx in repo.set(expr): pass
2625 for ctx in repo.set(expr): pass
2626 else:
2626 else:
2627 for r in repo.revs(expr): pass
2627 for r in repo.revs(expr): pass
2628 timer(d)
2628 timer(d)
2629 fm.end()
2629 fm.end()
2630
2630
2631 @command(b'perfvolatilesets',
2631 @command(b'perfvolatilesets',
2632 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
2632 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
2633 ] + formatteropts)
2633 ] + formatteropts)
2634 def perfvolatilesets(ui, repo, *names, **opts):
2634 def perfvolatilesets(ui, repo, *names, **opts):
2635 """benchmark the computation of various volatile set
2635 """benchmark the computation of various volatile set
2636
2636
2637 Volatile set computes element related to filtering and obsolescence."""
2637 Volatile set computes element related to filtering and obsolescence."""
2638 opts = _byteskwargs(opts)
2638 opts = _byteskwargs(opts)
2639 timer, fm = gettimer(ui, opts)
2639 timer, fm = gettimer(ui, opts)
2640 repo = repo.unfiltered()
2640 repo = repo.unfiltered()
2641
2641
2642 def getobs(name):
2642 def getobs(name):
2643 def d():
2643 def d():
2644 repo.invalidatevolatilesets()
2644 repo.invalidatevolatilesets()
2645 if opts[b'clear_obsstore']:
2645 if opts[b'clear_obsstore']:
2646 clearfilecache(repo, b'obsstore')
2646 clearfilecache(repo, b'obsstore')
2647 obsolete.getrevs(repo, name)
2647 obsolete.getrevs(repo, name)
2648 return d
2648 return d
2649
2649
2650 allobs = sorted(obsolete.cachefuncs)
2650 allobs = sorted(obsolete.cachefuncs)
2651 if names:
2651 if names:
2652 allobs = [n for n in allobs if n in names]
2652 allobs = [n for n in allobs if n in names]
2653
2653
2654 for name in allobs:
2654 for name in allobs:
2655 timer(getobs(name), title=name)
2655 timer(getobs(name), title=name)
2656
2656
2657 def getfiltered(name):
2657 def getfiltered(name):
2658 def d():
2658 def d():
2659 repo.invalidatevolatilesets()
2659 repo.invalidatevolatilesets()
2660 if opts[b'clear_obsstore']:
2660 if opts[b'clear_obsstore']:
2661 clearfilecache(repo, b'obsstore')
2661 clearfilecache(repo, b'obsstore')
2662 repoview.filterrevs(repo, name)
2662 repoview.filterrevs(repo, name)
2663 return d
2663 return d
2664
2664
2665 allfilter = sorted(repoview.filtertable)
2665 allfilter = sorted(repoview.filtertable)
2666 if names:
2666 if names:
2667 allfilter = [n for n in allfilter if n in names]
2667 allfilter = [n for n in allfilter if n in names]
2668
2668
2669 for name in allfilter:
2669 for name in allfilter:
2670 timer(getfiltered(name), title=name)
2670 timer(getfiltered(name), title=name)
2671 fm.end()
2671 fm.end()
2672
2672
2673 @command(b'perfbranchmap',
2673 @command(b'perfbranchmap',
2674 [(b'f', b'full', False,
2674 [(b'f', b'full', False,
2675 b'Includes build time of subset'),
2675 b'Includes build time of subset'),
2676 (b'', b'clear-revbranch', False,
2676 (b'', b'clear-revbranch', False,
2677 b'purge the revbranch cache between computation'),
2677 b'purge the revbranch cache between computation'),
2678 ] + formatteropts)
2678 ] + formatteropts)
2679 def perfbranchmap(ui, repo, *filternames, **opts):
2679 def perfbranchmap(ui, repo, *filternames, **opts):
2680 """benchmark the update of a branchmap
2680 """benchmark the update of a branchmap
2681
2681
2682 This benchmarks the full repo.branchmap() call with read and write disabled
2682 This benchmarks the full repo.branchmap() call with read and write disabled
2683 """
2683 """
2684 opts = _byteskwargs(opts)
2684 opts = _byteskwargs(opts)
2685 full = opts.get(b"full", False)
2685 full = opts.get(b"full", False)
2686 clear_revbranch = opts.get(b"clear_revbranch", False)
2686 clear_revbranch = opts.get(b"clear_revbranch", False)
2687 timer, fm = gettimer(ui, opts)
2687 timer, fm = gettimer(ui, opts)
2688 def getbranchmap(filtername):
2688 def getbranchmap(filtername):
2689 """generate a benchmark function for the filtername"""
2689 """generate a benchmark function for the filtername"""
2690 if filtername is None:
2690 if filtername is None:
2691 view = repo
2691 view = repo
2692 else:
2692 else:
2693 view = repo.filtered(filtername)
2693 view = repo.filtered(filtername)
2694 if util.safehasattr(view._branchcaches, '_per_filter'):
2694 if util.safehasattr(view._branchcaches, '_per_filter'):
2695 filtered = view._branchcaches._per_filter
2695 filtered = view._branchcaches._per_filter
2696 else:
2696 else:
2697 # older versions
2697 # older versions
2698 filtered = view._branchcaches
2698 filtered = view._branchcaches
2699 def d():
2699 def d():
2700 if clear_revbranch:
2700 if clear_revbranch:
2701 repo.revbranchcache()._clear()
2701 repo.revbranchcache()._clear()
2702 if full:
2702 if full:
2703 view._branchcaches.clear()
2703 view._branchcaches.clear()
2704 else:
2704 else:
2705 filtered.pop(filtername, None)
2705 filtered.pop(filtername, None)
2706 view.branchmap()
2706 view.branchmap()
2707 return d
2707 return d
2708 # add filter in smaller subset to bigger subset
2708 # add filter in smaller subset to bigger subset
2709 possiblefilters = set(repoview.filtertable)
2709 possiblefilters = set(repoview.filtertable)
2710 if filternames:
2710 if filternames:
2711 possiblefilters &= set(filternames)
2711 possiblefilters &= set(filternames)
2712 subsettable = getbranchmapsubsettable()
2712 subsettable = getbranchmapsubsettable()
2713 allfilters = []
2713 allfilters = []
2714 while possiblefilters:
2714 while possiblefilters:
2715 for name in possiblefilters:
2715 for name in possiblefilters:
2716 subset = subsettable.get(name)
2716 subset = subsettable.get(name)
2717 if subset not in possiblefilters:
2717 if subset not in possiblefilters:
2718 break
2718 break
2719 else:
2719 else:
2720 assert False, b'subset cycle %s!' % possiblefilters
2720 assert False, b'subset cycle %s!' % possiblefilters
2721 allfilters.append(name)
2721 allfilters.append(name)
2722 possiblefilters.remove(name)
2722 possiblefilters.remove(name)
2723
2723
2724 # warm the cache
2724 # warm the cache
2725 if not full:
2725 if not full:
2726 for name in allfilters:
2726 for name in allfilters:
2727 repo.filtered(name).branchmap()
2727 repo.filtered(name).branchmap()
2728 if not filternames or b'unfiltered' in filternames:
2728 if not filternames or b'unfiltered' in filternames:
2729 # add unfiltered
2729 # add unfiltered
2730 allfilters.append(None)
2730 allfilters.append(None)
2731
2731
2732 if util.safehasattr(branchmap.branchcache, 'fromfile'):
2732 if util.safehasattr(branchmap.branchcache, 'fromfile'):
2733 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
2733 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
2734 branchcacheread.set(classmethod(lambda *args: None))
2734 branchcacheread.set(classmethod(lambda *args: None))
2735 else:
2735 else:
2736 # older versions
2736 # older versions
2737 branchcacheread = safeattrsetter(branchmap, b'read')
2737 branchcacheread = safeattrsetter(branchmap, b'read')
2738 branchcacheread.set(lambda *args: None)
2738 branchcacheread.set(lambda *args: None)
2739 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
2739 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
2740 branchcachewrite.set(lambda *args: None)
2740 branchcachewrite.set(lambda *args: None)
2741 try:
2741 try:
2742 for name in allfilters:
2742 for name in allfilters:
2743 printname = name
2743 printname = name
2744 if name is None:
2744 if name is None:
2745 printname = b'unfiltered'
2745 printname = b'unfiltered'
2746 timer(getbranchmap(name), title=str(printname))
2746 timer(getbranchmap(name), title=str(printname))
2747 finally:
2747 finally:
2748 branchcacheread.restore()
2748 branchcacheread.restore()
2749 branchcachewrite.restore()
2749 branchcachewrite.restore()
2750 fm.end()
2750 fm.end()
2751
2751
2752 @command(b'perfbranchmapupdate', [
2752 @command(b'perfbranchmapupdate', [
2753 (b'', b'base', [], b'subset of revision to start from'),
2753 (b'', b'base', [], b'subset of revision to start from'),
2754 (b'', b'target', [], b'subset of revision to end with'),
2754 (b'', b'target', [], b'subset of revision to end with'),
2755 (b'', b'clear-caches', False, b'clear cache between each runs')
2755 (b'', b'clear-caches', False, b'clear cache between each runs')
2756 ] + formatteropts)
2756 ] + formatteropts)
2757 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
2757 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
2758 """benchmark branchmap update from for <base> revs to <target> revs
2758 """benchmark branchmap update from for <base> revs to <target> revs
2759
2759
2760 If `--clear-caches` is passed, the following items will be reset before
2760 If `--clear-caches` is passed, the following items will be reset before
2761 each update:
2761 each update:
2762 * the changelog instance and associated indexes
2762 * the changelog instance and associated indexes
2763 * the rev-branch-cache instance
2763 * the rev-branch-cache instance
2764
2764
2765 Examples:
2765 Examples:
2766
2766
2767 # update for the one last revision
2767 # update for the one last revision
2768 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
2768 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
2769
2769
2770 $ update for change coming with a new branch
2770 $ update for change coming with a new branch
2771 $ hg perfbranchmapupdate --base 'stable' --target 'default'
2771 $ hg perfbranchmapupdate --base 'stable' --target 'default'
2772 """
2772 """
2773 from mercurial import branchmap
2773 from mercurial import branchmap
2774 from mercurial import repoview
2774 from mercurial import repoview
2775 opts = _byteskwargs(opts)
2775 opts = _byteskwargs(opts)
2776 timer, fm = gettimer(ui, opts)
2776 timer, fm = gettimer(ui, opts)
2777 clearcaches = opts[b'clear_caches']
2777 clearcaches = opts[b'clear_caches']
2778 unfi = repo.unfiltered()
2778 unfi = repo.unfiltered()
2779 x = [None] # used to pass data between closure
2779 x = [None] # used to pass data between closure
2780
2780
2781 # we use a `list` here to avoid possible side effect from smartset
2781 # we use a `list` here to avoid possible side effect from smartset
2782 baserevs = list(scmutil.revrange(repo, base))
2782 baserevs = list(scmutil.revrange(repo, base))
2783 targetrevs = list(scmutil.revrange(repo, target))
2783 targetrevs = list(scmutil.revrange(repo, target))
2784 if not baserevs:
2784 if not baserevs:
2785 raise error.Abort(b'no revisions selected for --base')
2785 raise error.Abort(b'no revisions selected for --base')
2786 if not targetrevs:
2786 if not targetrevs:
2787 raise error.Abort(b'no revisions selected for --target')
2787 raise error.Abort(b'no revisions selected for --target')
2788
2788
2789 # make sure the target branchmap also contains the one in the base
2789 # make sure the target branchmap also contains the one in the base
2790 targetrevs = list(set(baserevs) | set(targetrevs))
2790 targetrevs = list(set(baserevs) | set(targetrevs))
2791 targetrevs.sort()
2791 targetrevs.sort()
2792
2792
2793 cl = repo.changelog
2793 cl = repo.changelog
2794 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
2794 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
2795 allbaserevs.sort()
2795 allbaserevs.sort()
2796 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
2796 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
2797
2797
2798 newrevs = list(alltargetrevs.difference(allbaserevs))
2798 newrevs = list(alltargetrevs.difference(allbaserevs))
2799 newrevs.sort()
2799 newrevs.sort()
2800
2800
2801 allrevs = frozenset(unfi.changelog.revs())
2801 allrevs = frozenset(unfi.changelog.revs())
2802 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
2802 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
2803 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
2803 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
2804
2804
2805 def basefilter(repo, visibilityexceptions=None):
2805 def basefilter(repo, visibilityexceptions=None):
2806 return basefilterrevs
2806 return basefilterrevs
2807
2807
2808 def targetfilter(repo, visibilityexceptions=None):
2808 def targetfilter(repo, visibilityexceptions=None):
2809 return targetfilterrevs
2809 return targetfilterrevs
2810
2810
2811 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
2811 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
2812 ui.status(msg % (len(allbaserevs), len(newrevs)))
2812 ui.status(msg % (len(allbaserevs), len(newrevs)))
2813 if targetfilterrevs:
2813 if targetfilterrevs:
2814 msg = b'(%d revisions still filtered)\n'
2814 msg = b'(%d revisions still filtered)\n'
2815 ui.status(msg % len(targetfilterrevs))
2815 ui.status(msg % len(targetfilterrevs))
2816
2816
2817 try:
2817 try:
2818 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
2818 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
2819 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
2819 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
2820
2820
2821 baserepo = repo.filtered(b'__perf_branchmap_update_base')
2821 baserepo = repo.filtered(b'__perf_branchmap_update_base')
2822 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
2822 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
2823
2823
2824 # try to find an existing branchmap to reuse
2824 # try to find an existing branchmap to reuse
2825 subsettable = getbranchmapsubsettable()
2825 subsettable = getbranchmapsubsettable()
2826 candidatefilter = subsettable.get(None)
2826 candidatefilter = subsettable.get(None)
2827 while candidatefilter is not None:
2827 while candidatefilter is not None:
2828 candidatebm = repo.filtered(candidatefilter).branchmap()
2828 candidatebm = repo.filtered(candidatefilter).branchmap()
2829 if candidatebm.validfor(baserepo):
2829 if candidatebm.validfor(baserepo):
2830 filtered = repoview.filterrevs(repo, candidatefilter)
2830 filtered = repoview.filterrevs(repo, candidatefilter)
2831 missing = [r for r in allbaserevs if r in filtered]
2831 missing = [r for r in allbaserevs if r in filtered]
2832 base = candidatebm.copy()
2832 base = candidatebm.copy()
2833 base.update(baserepo, missing)
2833 base.update(baserepo, missing)
2834 break
2834 break
2835 candidatefilter = subsettable.get(candidatefilter)
2835 candidatefilter = subsettable.get(candidatefilter)
2836 else:
2836 else:
2837 # no suitable subset where found
2837 # no suitable subset where found
2838 base = branchmap.branchcache()
2838 base = branchmap.branchcache()
2839 base.update(baserepo, allbaserevs)
2839 base.update(baserepo, allbaserevs)
2840
2840
2841 def setup():
2841 def setup():
2842 x[0] = base.copy()
2842 x[0] = base.copy()
2843 if clearcaches:
2843 if clearcaches:
2844 unfi._revbranchcache = None
2844 unfi._revbranchcache = None
2845 clearchangelog(repo)
2845 clearchangelog(repo)
2846
2846
2847 def bench():
2847 def bench():
2848 x[0].update(targetrepo, newrevs)
2848 x[0].update(targetrepo, newrevs)
2849
2849
2850 timer(bench, setup=setup)
2850 timer(bench, setup=setup)
2851 fm.end()
2851 fm.end()
2852 finally:
2852 finally:
2853 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
2853 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
2854 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
2854 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
2855
2855
2856 @command(b'perfbranchmapload', [
2856 @command(b'perfbranchmapload', [
2857 (b'f', b'filter', b'', b'Specify repoview filter'),
2857 (b'f', b'filter', b'', b'Specify repoview filter'),
2858 (b'', b'list', False, b'List brachmap filter caches'),
2858 (b'', b'list', False, b'List brachmap filter caches'),
2859 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
2859 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
2860
2860
2861 ] + formatteropts)
2861 ] + formatteropts)
2862 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
2862 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
2863 """benchmark reading the branchmap"""
2863 """benchmark reading the branchmap"""
2864 opts = _byteskwargs(opts)
2864 opts = _byteskwargs(opts)
2865 clearrevlogs = opts[b'clear_revlogs']
2865 clearrevlogs = opts[b'clear_revlogs']
2866
2866
2867 if list:
2867 if list:
2868 for name, kind, st in repo.cachevfs.readdir(stat=True):
2868 for name, kind, st in repo.cachevfs.readdir(stat=True):
2869 if name.startswith(b'branch2'):
2869 if name.startswith(b'branch2'):
2870 filtername = name.partition(b'-')[2] or b'unfiltered'
2870 filtername = name.partition(b'-')[2] or b'unfiltered'
2871 ui.status(b'%s - %s\n'
2871 ui.status(b'%s - %s\n'
2872 % (filtername, util.bytecount(st.st_size)))
2872 % (filtername, util.bytecount(st.st_size)))
2873 return
2873 return
2874 if not filter:
2874 if not filter:
2875 filter = None
2875 filter = None
2876 subsettable = getbranchmapsubsettable()
2876 subsettable = getbranchmapsubsettable()
2877 if filter is None:
2877 if filter is None:
2878 repo = repo.unfiltered()
2878 repo = repo.unfiltered()
2879 else:
2879 else:
2880 repo = repoview.repoview(repo, filter)
2880 repo = repoview.repoview(repo, filter)
2881
2881
2882 repo.branchmap() # make sure we have a relevant, up to date branchmap
2882 repo.branchmap() # make sure we have a relevant, up to date branchmap
2883
2883
2884 try:
2884 try:
2885 fromfile = branchmap.branchcache.fromfile
2885 fromfile = branchmap.branchcache.fromfile
2886 except AttributeError:
2886 except AttributeError:
2887 # older versions
2887 # older versions
2888 fromfile = branchmap.read
2888 fromfile = branchmap.read
2889
2889
2890 currentfilter = filter
2890 currentfilter = filter
2891 # try once without timer, the filter may not be cached
2891 # try once without timer, the filter may not be cached
2892 while fromfile(repo) is None:
2892 while fromfile(repo) is None:
2893 currentfilter = subsettable.get(currentfilter)
2893 currentfilter = subsettable.get(currentfilter)
2894 if currentfilter is None:
2894 if currentfilter is None:
2895 raise error.Abort(b'No branchmap cached for %s repo'
2895 raise error.Abort(b'No branchmap cached for %s repo'
2896 % (filter or b'unfiltered'))
2896 % (filter or b'unfiltered'))
2897 repo = repo.filtered(currentfilter)
2897 repo = repo.filtered(currentfilter)
2898 timer, fm = gettimer(ui, opts)
2898 timer, fm = gettimer(ui, opts)
2899 def setup():
2899 def setup():
2900 if clearrevlogs:
2900 if clearrevlogs:
2901 clearchangelog(repo)
2901 clearchangelog(repo)
2902 def bench():
2902 def bench():
2903 fromfile(repo)
2903 fromfile(repo)
2904 timer(bench, setup=setup)
2904 timer(bench, setup=setup)
2905 fm.end()
2905 fm.end()
2906
2906
2907 @command(b'perfloadmarkers')
2907 @command(b'perfloadmarkers')
2908 def perfloadmarkers(ui, repo):
2908 def perfloadmarkers(ui, repo):
2909 """benchmark the time to parse the on-disk markers for a repo
2909 """benchmark the time to parse the on-disk markers for a repo
2910
2910
2911 Result is the number of markers in the repo."""
2911 Result is the number of markers in the repo."""
2912 timer, fm = gettimer(ui)
2912 timer, fm = gettimer(ui)
2913 svfs = getsvfs(repo)
2913 svfs = getsvfs(repo)
2914 timer(lambda: len(obsolete.obsstore(svfs)))
2914 timer(lambda: len(obsolete.obsstore(svfs)))
2915 fm.end()
2915 fm.end()
2916
2916
2917 @command(b'perflrucachedict', formatteropts +
2917 @command(b'perflrucachedict', formatteropts +
2918 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
2918 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
2919 (b'', b'mincost', 0, b'smallest cost of items in cache'),
2919 (b'', b'mincost', 0, b'smallest cost of items in cache'),
2920 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
2920 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
2921 (b'', b'size', 4, b'size of cache'),
2921 (b'', b'size', 4, b'size of cache'),
2922 (b'', b'gets', 10000, b'number of key lookups'),
2922 (b'', b'gets', 10000, b'number of key lookups'),
2923 (b'', b'sets', 10000, b'number of key sets'),
2923 (b'', b'sets', 10000, b'number of key sets'),
2924 (b'', b'mixed', 10000, b'number of mixed mode operations'),
2924 (b'', b'mixed', 10000, b'number of mixed mode operations'),
2925 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
2925 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
2926 norepo=True)
2926 norepo=True)
2927 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
2927 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
2928 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
2928 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
2929 opts = _byteskwargs(opts)
2929 opts = _byteskwargs(opts)
2930
2930
2931 def doinit():
2931 def doinit():
2932 for i in _xrange(10000):
2932 for i in _xrange(10000):
2933 util.lrucachedict(size)
2933 util.lrucachedict(size)
2934
2934
2935 costrange = list(range(mincost, maxcost + 1))
2935 costrange = list(range(mincost, maxcost + 1))
2936
2936
2937 values = []
2937 values = []
2938 for i in _xrange(size):
2938 for i in _xrange(size):
2939 values.append(random.randint(0, _maxint))
2939 values.append(random.randint(0, _maxint))
2940
2940
2941 # Get mode fills the cache and tests raw lookup performance with no
2941 # Get mode fills the cache and tests raw lookup performance with no
2942 # eviction.
2942 # eviction.
2943 getseq = []
2943 getseq = []
2944 for i in _xrange(gets):
2944 for i in _xrange(gets):
2945 getseq.append(random.choice(values))
2945 getseq.append(random.choice(values))
2946
2946
2947 def dogets():
2947 def dogets():
2948 d = util.lrucachedict(size)
2948 d = util.lrucachedict(size)
2949 for v in values:
2949 for v in values:
2950 d[v] = v
2950 d[v] = v
2951 for key in getseq:
2951 for key in getseq:
2952 value = d[key]
2952 value = d[key]
2953 value # silence pyflakes warning
2953 value # silence pyflakes warning
2954
2954
2955 def dogetscost():
2955 def dogetscost():
2956 d = util.lrucachedict(size, maxcost=costlimit)
2956 d = util.lrucachedict(size, maxcost=costlimit)
2957 for i, v in enumerate(values):
2957 for i, v in enumerate(values):
2958 d.insert(v, v, cost=costs[i])
2958 d.insert(v, v, cost=costs[i])
2959 for key in getseq:
2959 for key in getseq:
2960 try:
2960 try:
2961 value = d[key]
2961 value = d[key]
2962 value # silence pyflakes warning
2962 value # silence pyflakes warning
2963 except KeyError:
2963 except KeyError:
2964 pass
2964 pass
2965
2965
2966 # Set mode tests insertion speed with cache eviction.
2966 # Set mode tests insertion speed with cache eviction.
2967 setseq = []
2967 setseq = []
2968 costs = []
2968 costs = []
2969 for i in _xrange(sets):
2969 for i in _xrange(sets):
2970 setseq.append(random.randint(0, _maxint))
2970 setseq.append(random.randint(0, _maxint))
2971 costs.append(random.choice(costrange))
2971 costs.append(random.choice(costrange))
2972
2972
2973 def doinserts():
2973 def doinserts():
2974 d = util.lrucachedict(size)
2974 d = util.lrucachedict(size)
2975 for v in setseq:
2975 for v in setseq:
2976 d.insert(v, v)
2976 d.insert(v, v)
2977
2977
2978 def doinsertscost():
2978 def doinsertscost():
2979 d = util.lrucachedict(size, maxcost=costlimit)
2979 d = util.lrucachedict(size, maxcost=costlimit)
2980 for i, v in enumerate(setseq):
2980 for i, v in enumerate(setseq):
2981 d.insert(v, v, cost=costs[i])
2981 d.insert(v, v, cost=costs[i])
2982
2982
2983 def dosets():
2983 def dosets():
2984 d = util.lrucachedict(size)
2984 d = util.lrucachedict(size)
2985 for v in setseq:
2985 for v in setseq:
2986 d[v] = v
2986 d[v] = v
2987
2987
2988 # Mixed mode randomly performs gets and sets with eviction.
2988 # Mixed mode randomly performs gets and sets with eviction.
2989 mixedops = []
2989 mixedops = []
2990 for i in _xrange(mixed):
2990 for i in _xrange(mixed):
2991 r = random.randint(0, 100)
2991 r = random.randint(0, 100)
2992 if r < mixedgetfreq:
2992 if r < mixedgetfreq:
2993 op = 0
2993 op = 0
2994 else:
2994 else:
2995 op = 1
2995 op = 1
2996
2996
2997 mixedops.append((op,
2997 mixedops.append((op,
2998 random.randint(0, size * 2),
2998 random.randint(0, size * 2),
2999 random.choice(costrange)))
2999 random.choice(costrange)))
3000
3000
3001 def domixed():
3001 def domixed():
3002 d = util.lrucachedict(size)
3002 d = util.lrucachedict(size)
3003
3003
3004 for op, v, cost in mixedops:
3004 for op, v, cost in mixedops:
3005 if op == 0:
3005 if op == 0:
3006 try:
3006 try:
3007 d[v]
3007 d[v]
3008 except KeyError:
3008 except KeyError:
3009 pass
3009 pass
3010 else:
3010 else:
3011 d[v] = v
3011 d[v] = v
3012
3012
3013 def domixedcost():
3013 def domixedcost():
3014 d = util.lrucachedict(size, maxcost=costlimit)
3014 d = util.lrucachedict(size, maxcost=costlimit)
3015
3015
3016 for op, v, cost in mixedops:
3016 for op, v, cost in mixedops:
3017 if op == 0:
3017 if op == 0:
3018 try:
3018 try:
3019 d[v]
3019 d[v]
3020 except KeyError:
3020 except KeyError:
3021 pass
3021 pass
3022 else:
3022 else:
3023 d.insert(v, v, cost=cost)
3023 d.insert(v, v, cost=cost)
3024
3024
3025 benches = [
3025 benches = [
3026 (doinit, b'init'),
3026 (doinit, b'init'),
3027 ]
3027 ]
3028
3028
3029 if costlimit:
3029 if costlimit:
3030 benches.extend([
3030 benches.extend([
3031 (dogetscost, b'gets w/ cost limit'),
3031 (dogetscost, b'gets w/ cost limit'),
3032 (doinsertscost, b'inserts w/ cost limit'),
3032 (doinsertscost, b'inserts w/ cost limit'),
3033 (domixedcost, b'mixed w/ cost limit'),
3033 (domixedcost, b'mixed w/ cost limit'),
3034 ])
3034 ])
3035 else:
3035 else:
3036 benches.extend([
3036 benches.extend([
3037 (dogets, b'gets'),
3037 (dogets, b'gets'),
3038 (doinserts, b'inserts'),
3038 (doinserts, b'inserts'),
3039 (dosets, b'sets'),
3039 (dosets, b'sets'),
3040 (domixed, b'mixed')
3040 (domixed, b'mixed')
3041 ])
3041 ])
3042
3042
3043 for fn, title in benches:
3043 for fn, title in benches:
3044 timer, fm = gettimer(ui, opts)
3044 timer, fm = gettimer(ui, opts)
3045 timer(fn, title=title)
3045 timer(fn, title=title)
3046 fm.end()
3046 fm.end()
3047
3047
3048 @command(b'perfwrite', formatteropts)
3048 @command(b'perfwrite', formatteropts)
3049 def perfwrite(ui, repo, **opts):
3049 def perfwrite(ui, repo, **opts):
3050 """microbenchmark ui.write
3050 """microbenchmark ui.write
3051 """
3051 """
3052 opts = _byteskwargs(opts)
3052 opts = _byteskwargs(opts)
3053
3053
3054 timer, fm = gettimer(ui, opts)
3054 timer, fm = gettimer(ui, opts)
3055 def write():
3055 def write():
3056 for i in range(100000):
3056 for i in range(100000):
3057 ui.write((b'Testing write performance\n'))
3057 ui.write((b'Testing write performance\n'))
3058 timer(write)
3058 timer(write)
3059 fm.end()
3059 fm.end()
3060
3060
3061 def uisetup(ui):
3061 def uisetup(ui):
3062 if (util.safehasattr(cmdutil, b'openrevlog') and
3062 if (util.safehasattr(cmdutil, b'openrevlog') and
3063 not util.safehasattr(commands, b'debugrevlogopts')):
3063 not util.safehasattr(commands, b'debugrevlogopts')):
3064 # for "historical portability":
3064 # for "historical portability":
3065 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
3065 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
3066 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
3066 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
3067 # openrevlog() should cause failure, because it has been
3067 # openrevlog() should cause failure, because it has been
3068 # available since 3.5 (or 49c583ca48c4).
3068 # available since 3.5 (or 49c583ca48c4).
3069 def openrevlog(orig, repo, cmd, file_, opts):
3069 def openrevlog(orig, repo, cmd, file_, opts):
3070 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
3070 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
3071 raise error.Abort(b"This version doesn't support --dir option",
3071 raise error.Abort(b"This version doesn't support --dir option",
3072 hint=b"use 3.5 or later")
3072 hint=b"use 3.5 or later")
3073 return orig(repo, cmd, file_, opts)
3073 return orig(repo, cmd, file_, opts)
3074 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
3074 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
3075
3075
3076 @command(b'perfprogress', formatteropts + [
3076 @command(b'perfprogress', formatteropts + [
3077 (b'', b'topic', b'topic', b'topic for progress messages'),
3077 (b'', b'topic', b'topic', b'topic for progress messages'),
3078 (b'c', b'total', 1000000, b'total value we are progressing to'),
3078 (b'c', b'total', 1000000, b'total value we are progressing to'),
3079 ], norepo=True)
3079 ], norepo=True)
3080 def perfprogress(ui, topic=None, total=None, **opts):
3080 def perfprogress(ui, topic=None, total=None, **opts):
3081 """printing of progress bars"""
3081 """printing of progress bars"""
3082 opts = _byteskwargs(opts)
3082 opts = _byteskwargs(opts)
3083
3083
3084 timer, fm = gettimer(ui, opts)
3084 timer, fm = gettimer(ui, opts)
3085
3085
3086 def doprogress():
3086 def doprogress():
3087 with ui.makeprogress(topic, total=total) as progress:
3087 with ui.makeprogress(topic, total=total) as progress:
3088 for i in pycompat.xrange(total):
3088 for i in pycompat.xrange(total):
3089 progress.increment()
3089 progress.increment()
3090
3090
3091 timer(doprogress)
3091 timer(doprogress)
3092 fm.end()
3092 fm.end()
General Comments 0
You need to be logged in to leave comments. Login now