##// END OF EJS Templates
perf: don't depend on pycompat for older Mercurial versions...
Martin von Zweigbergk -
r43053:c0000597 default
parent child Browse files
Show More
@@ -1,3092 +1,3094 b''
1 # perf.py - performance test routines
1 # perf.py - performance test routines
2 '''helper extension to measure performance
2 '''helper extension to measure performance
3
3
4 Configurations
4 Configurations
5 ==============
5 ==============
6
6
7 ``perf``
7 ``perf``
8 --------
8 --------
9
9
10 ``all-timing``
10 ``all-timing``
11 When set, additional statistics will be reported for each benchmark: best,
11 When set, additional statistics will be reported for each benchmark: best,
12 worst, median average. If not set only the best timing is reported
12 worst, median average. If not set only the best timing is reported
13 (default: off).
13 (default: off).
14
14
15 ``presleep``
15 ``presleep``
16 number of second to wait before any group of runs (default: 1)
16 number of second to wait before any group of runs (default: 1)
17
17
18 ``pre-run``
18 ``pre-run``
19 number of run to perform before starting measurement.
19 number of run to perform before starting measurement.
20
20
21 ``profile-benchmark``
21 ``profile-benchmark``
22 Enable profiling for the benchmarked section.
22 Enable profiling for the benchmarked section.
23 (The first iteration is benchmarked)
23 (The first iteration is benchmarked)
24
24
25 ``run-limits``
25 ``run-limits``
26 Control the number of runs each benchmark will perform. The option value
26 Control the number of runs each benchmark will perform. The option value
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
28 conditions are considered in order with the following logic:
28 conditions are considered in order with the following logic:
29
29
30 If benchmark has been running for <time> seconds, and we have performed
30 If benchmark has been running for <time> seconds, and we have performed
31 <numberofrun> iterations, stop the benchmark,
31 <numberofrun> iterations, stop the benchmark,
32
32
33 The default value is: `3.0-100, 10.0-3`
33 The default value is: `3.0-100, 10.0-3`
34
34
35 ``stub``
35 ``stub``
36 When set, benchmarks will only be run once, useful for testing
36 When set, benchmarks will only be run once, useful for testing
37 (default: off)
37 (default: off)
38 '''
38 '''
39
39
40 # "historical portability" policy of perf.py:
40 # "historical portability" policy of perf.py:
41 #
41 #
42 # We have to do:
42 # We have to do:
43 # - make perf.py "loadable" with as wide Mercurial version as possible
43 # - make perf.py "loadable" with as wide Mercurial version as possible
44 # This doesn't mean that perf commands work correctly with that Mercurial.
44 # This doesn't mean that perf commands work correctly with that Mercurial.
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
46 # - make historical perf command work correctly with as wide Mercurial
46 # - make historical perf command work correctly with as wide Mercurial
47 # version as possible
47 # version as possible
48 #
48 #
49 # We have to do, if possible with reasonable cost:
49 # We have to do, if possible with reasonable cost:
50 # - make recent perf command for historical feature work correctly
50 # - make recent perf command for historical feature work correctly
51 # with early Mercurial
51 # with early Mercurial
52 #
52 #
53 # We don't have to do:
53 # We don't have to do:
54 # - make perf command for recent feature work correctly with early
54 # - make perf command for recent feature work correctly with early
55 # Mercurial
55 # Mercurial
56
56
57 from __future__ import absolute_import
57 from __future__ import absolute_import
58 import contextlib
58 import contextlib
59 import functools
59 import functools
60 import gc
60 import gc
61 import os
61 import os
62 import random
62 import random
63 import shutil
63 import shutil
64 import struct
64 import struct
65 import sys
65 import sys
66 import tempfile
66 import tempfile
67 import threading
67 import threading
68 import time
68 import time
69 from mercurial import (
69 from mercurial import (
70 changegroup,
70 changegroup,
71 cmdutil,
71 cmdutil,
72 commands,
72 commands,
73 copies,
73 copies,
74 error,
74 error,
75 extensions,
75 extensions,
76 hg,
76 hg,
77 mdiff,
77 mdiff,
78 merge,
78 merge,
79 revlog,
79 revlog,
80 util,
80 util,
81 )
81 )
82
82
83 # for "historical portability":
83 # for "historical portability":
84 # try to import modules separately (in dict order), and ignore
84 # try to import modules separately (in dict order), and ignore
85 # failure, because these aren't available with early Mercurial
85 # failure, because these aren't available with early Mercurial
86 try:
86 try:
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
88 except ImportError:
88 except ImportError:
89 pass
89 pass
90 try:
90 try:
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
92 except ImportError:
92 except ImportError:
93 pass
93 pass
94 try:
94 try:
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
96 dir(registrar) # forcibly load it
96 dir(registrar) # forcibly load it
97 except ImportError:
97 except ImportError:
98 registrar = None
98 registrar = None
99 try:
99 try:
100 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
100 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
101 except ImportError:
101 except ImportError:
102 pass
102 pass
103 try:
103 try:
104 from mercurial.utils import repoviewutil # since 5.0
104 from mercurial.utils import repoviewutil # since 5.0
105 except ImportError:
105 except ImportError:
106 repoviewutil = None
106 repoviewutil = None
107 try:
107 try:
108 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
108 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
109 except ImportError:
109 except ImportError:
110 pass
110 pass
111 try:
111 try:
112 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
112 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
113 except ImportError:
113 except ImportError:
114 pass
114 pass
115
115
116 try:
116 try:
117 from mercurial import profiling
117 from mercurial import profiling
118 except ImportError:
118 except ImportError:
119 profiling = None
119 profiling = None
120
120
121 def identity(a):
121 def identity(a):
122 return a
122 return a
123
123
124 try:
124 try:
125 from mercurial import pycompat
125 from mercurial import pycompat
126 getargspec = pycompat.getargspec # added to module after 4.5
126 getargspec = pycompat.getargspec # added to module after 4.5
127 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
127 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
128 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
128 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
129 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
129 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
130 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
130 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
131 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
131 if pycompat.ispy3:
132 if pycompat.ispy3:
132 _maxint = sys.maxsize # per py3 docs for replacing maxint
133 _maxint = sys.maxsize # per py3 docs for replacing maxint
133 else:
134 else:
134 _maxint = sys.maxint
135 _maxint = sys.maxint
135 except (NameError, ImportError, AttributeError):
136 except (NameError, ImportError, AttributeError):
136 import inspect
137 import inspect
137 getargspec = inspect.getargspec
138 getargspec = inspect.getargspec
138 _byteskwargs = identity
139 _byteskwargs = identity
140 _bytestr = str
139 fsencode = identity # no py3 support
141 fsencode = identity # no py3 support
140 _maxint = sys.maxint # no py3 support
142 _maxint = sys.maxint # no py3 support
141 _sysstr = lambda x: x # no py3 support
143 _sysstr = lambda x: x # no py3 support
142 _xrange = xrange
144 _xrange = xrange
143
145
144 try:
146 try:
145 # 4.7+
147 # 4.7+
146 queue = pycompat.queue.Queue
148 queue = pycompat.queue.Queue
147 except (NameError, AttributeError, ImportError):
149 except (NameError, AttributeError, ImportError):
148 # <4.7.
150 # <4.7.
149 try:
151 try:
150 queue = pycompat.queue
152 queue = pycompat.queue
151 except (NameError, AttributeError, ImportError):
153 except (NameError, AttributeError, ImportError):
152 import Queue as queue
154 import Queue as queue
153
155
154 try:
156 try:
155 from mercurial import logcmdutil
157 from mercurial import logcmdutil
156 makelogtemplater = logcmdutil.maketemplater
158 makelogtemplater = logcmdutil.maketemplater
157 except (AttributeError, ImportError):
159 except (AttributeError, ImportError):
158 try:
160 try:
159 makelogtemplater = cmdutil.makelogtemplater
161 makelogtemplater = cmdutil.makelogtemplater
160 except (AttributeError, ImportError):
162 except (AttributeError, ImportError):
161 makelogtemplater = None
163 makelogtemplater = None
162
164
163 # for "historical portability":
165 # for "historical portability":
164 # define util.safehasattr forcibly, because util.safehasattr has been
166 # define util.safehasattr forcibly, because util.safehasattr has been
165 # available since 1.9.3 (or 94b200a11cf7)
167 # available since 1.9.3 (or 94b200a11cf7)
166 _undefined = object()
168 _undefined = object()
167 def safehasattr(thing, attr):
169 def safehasattr(thing, attr):
168 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
170 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
169 setattr(util, 'safehasattr', safehasattr)
171 setattr(util, 'safehasattr', safehasattr)
170
172
171 # for "historical portability":
173 # for "historical portability":
172 # define util.timer forcibly, because util.timer has been available
174 # define util.timer forcibly, because util.timer has been available
173 # since ae5d60bb70c9
175 # since ae5d60bb70c9
174 if safehasattr(time, 'perf_counter'):
176 if safehasattr(time, 'perf_counter'):
175 util.timer = time.perf_counter
177 util.timer = time.perf_counter
176 elif os.name == b'nt':
178 elif os.name == b'nt':
177 util.timer = time.clock
179 util.timer = time.clock
178 else:
180 else:
179 util.timer = time.time
181 util.timer = time.time
180
182
181 # for "historical portability":
183 # for "historical portability":
182 # use locally defined empty option list, if formatteropts isn't
184 # use locally defined empty option list, if formatteropts isn't
183 # available, because commands.formatteropts has been available since
185 # available, because commands.formatteropts has been available since
184 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
186 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
185 # available since 2.2 (or ae5f92e154d3)
187 # available since 2.2 (or ae5f92e154d3)
186 formatteropts = getattr(cmdutil, "formatteropts",
188 formatteropts = getattr(cmdutil, "formatteropts",
187 getattr(commands, "formatteropts", []))
189 getattr(commands, "formatteropts", []))
188
190
189 # for "historical portability":
191 # for "historical portability":
190 # use locally defined option list, if debugrevlogopts isn't available,
192 # use locally defined option list, if debugrevlogopts isn't available,
191 # because commands.debugrevlogopts has been available since 3.7 (or
193 # because commands.debugrevlogopts has been available since 3.7 (or
192 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
194 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
193 # since 1.9 (or a79fea6b3e77).
195 # since 1.9 (or a79fea6b3e77).
194 revlogopts = getattr(cmdutil, "debugrevlogopts",
196 revlogopts = getattr(cmdutil, "debugrevlogopts",
195 getattr(commands, "debugrevlogopts", [
197 getattr(commands, "debugrevlogopts", [
196 (b'c', b'changelog', False, (b'open changelog')),
198 (b'c', b'changelog', False, (b'open changelog')),
197 (b'm', b'manifest', False, (b'open manifest')),
199 (b'm', b'manifest', False, (b'open manifest')),
198 (b'', b'dir', False, (b'open directory manifest')),
200 (b'', b'dir', False, (b'open directory manifest')),
199 ]))
201 ]))
200
202
201 cmdtable = {}
203 cmdtable = {}
202
204
203 # for "historical portability":
205 # for "historical portability":
204 # define parsealiases locally, because cmdutil.parsealiases has been
206 # define parsealiases locally, because cmdutil.parsealiases has been
205 # available since 1.5 (or 6252852b4332)
207 # available since 1.5 (or 6252852b4332)
206 def parsealiases(cmd):
208 def parsealiases(cmd):
207 return cmd.split(b"|")
209 return cmd.split(b"|")
208
210
209 if safehasattr(registrar, 'command'):
211 if safehasattr(registrar, 'command'):
210 command = registrar.command(cmdtable)
212 command = registrar.command(cmdtable)
211 elif safehasattr(cmdutil, 'command'):
213 elif safehasattr(cmdutil, 'command'):
212 command = cmdutil.command(cmdtable)
214 command = cmdutil.command(cmdtable)
213 if b'norepo' not in getargspec(command).args:
215 if b'norepo' not in getargspec(command).args:
214 # for "historical portability":
216 # for "historical portability":
215 # wrap original cmdutil.command, because "norepo" option has
217 # wrap original cmdutil.command, because "norepo" option has
216 # been available since 3.1 (or 75a96326cecb)
218 # been available since 3.1 (or 75a96326cecb)
217 _command = command
219 _command = command
218 def command(name, options=(), synopsis=None, norepo=False):
220 def command(name, options=(), synopsis=None, norepo=False):
219 if norepo:
221 if norepo:
220 commands.norepo += b' %s' % b' '.join(parsealiases(name))
222 commands.norepo += b' %s' % b' '.join(parsealiases(name))
221 return _command(name, list(options), synopsis)
223 return _command(name, list(options), synopsis)
222 else:
224 else:
223 # for "historical portability":
225 # for "historical portability":
224 # define "@command" annotation locally, because cmdutil.command
226 # define "@command" annotation locally, because cmdutil.command
225 # has been available since 1.9 (or 2daa5179e73f)
227 # has been available since 1.9 (or 2daa5179e73f)
226 def command(name, options=(), synopsis=None, norepo=False):
228 def command(name, options=(), synopsis=None, norepo=False):
227 def decorator(func):
229 def decorator(func):
228 if synopsis:
230 if synopsis:
229 cmdtable[name] = func, list(options), synopsis
231 cmdtable[name] = func, list(options), synopsis
230 else:
232 else:
231 cmdtable[name] = func, list(options)
233 cmdtable[name] = func, list(options)
232 if norepo:
234 if norepo:
233 commands.norepo += b' %s' % b' '.join(parsealiases(name))
235 commands.norepo += b' %s' % b' '.join(parsealiases(name))
234 return func
236 return func
235 return decorator
237 return decorator
236
238
237 try:
239 try:
238 import mercurial.registrar
240 import mercurial.registrar
239 import mercurial.configitems
241 import mercurial.configitems
240 configtable = {}
242 configtable = {}
241 configitem = mercurial.registrar.configitem(configtable)
243 configitem = mercurial.registrar.configitem(configtable)
242 configitem(b'perf', b'presleep',
244 configitem(b'perf', b'presleep',
243 default=mercurial.configitems.dynamicdefault,
245 default=mercurial.configitems.dynamicdefault,
244 experimental=True,
246 experimental=True,
245 )
247 )
246 configitem(b'perf', b'stub',
248 configitem(b'perf', b'stub',
247 default=mercurial.configitems.dynamicdefault,
249 default=mercurial.configitems.dynamicdefault,
248 experimental=True,
250 experimental=True,
249 )
251 )
250 configitem(b'perf', b'parentscount',
252 configitem(b'perf', b'parentscount',
251 default=mercurial.configitems.dynamicdefault,
253 default=mercurial.configitems.dynamicdefault,
252 experimental=True,
254 experimental=True,
253 )
255 )
254 configitem(b'perf', b'all-timing',
256 configitem(b'perf', b'all-timing',
255 default=mercurial.configitems.dynamicdefault,
257 default=mercurial.configitems.dynamicdefault,
256 experimental=True,
258 experimental=True,
257 )
259 )
258 configitem(b'perf', b'pre-run',
260 configitem(b'perf', b'pre-run',
259 default=mercurial.configitems.dynamicdefault,
261 default=mercurial.configitems.dynamicdefault,
260 )
262 )
261 configitem(b'perf', b'profile-benchmark',
263 configitem(b'perf', b'profile-benchmark',
262 default=mercurial.configitems.dynamicdefault,
264 default=mercurial.configitems.dynamicdefault,
263 )
265 )
264 configitem(b'perf', b'run-limits',
266 configitem(b'perf', b'run-limits',
265 default=mercurial.configitems.dynamicdefault,
267 default=mercurial.configitems.dynamicdefault,
266 experimental=True,
268 experimental=True,
267 )
269 )
268 except (ImportError, AttributeError):
270 except (ImportError, AttributeError):
269 pass
271 pass
270 except TypeError:
272 except TypeError:
271 # compatibility fix for a11fd395e83f
273 # compatibility fix for a11fd395e83f
272 # hg version: 5.2
274 # hg version: 5.2
273 configitem(b'perf', b'presleep',
275 configitem(b'perf', b'presleep',
274 default=mercurial.configitems.dynamicdefault,
276 default=mercurial.configitems.dynamicdefault,
275 )
277 )
276 configitem(b'perf', b'stub',
278 configitem(b'perf', b'stub',
277 default=mercurial.configitems.dynamicdefault,
279 default=mercurial.configitems.dynamicdefault,
278 )
280 )
279 configitem(b'perf', b'parentscount',
281 configitem(b'perf', b'parentscount',
280 default=mercurial.configitems.dynamicdefault,
282 default=mercurial.configitems.dynamicdefault,
281 )
283 )
282 configitem(b'perf', b'all-timing',
284 configitem(b'perf', b'all-timing',
283 default=mercurial.configitems.dynamicdefault,
285 default=mercurial.configitems.dynamicdefault,
284 )
286 )
285 configitem(b'perf', b'pre-run',
287 configitem(b'perf', b'pre-run',
286 default=mercurial.configitems.dynamicdefault,
288 default=mercurial.configitems.dynamicdefault,
287 )
289 )
288 configitem(b'perf', b'profile-benchmark',
290 configitem(b'perf', b'profile-benchmark',
289 default=mercurial.configitems.dynamicdefault,
291 default=mercurial.configitems.dynamicdefault,
290 )
292 )
291 configitem(b'perf', b'run-limits',
293 configitem(b'perf', b'run-limits',
292 default=mercurial.configitems.dynamicdefault,
294 default=mercurial.configitems.dynamicdefault,
293 )
295 )
294
296
295 def getlen(ui):
297 def getlen(ui):
296 if ui.configbool(b"perf", b"stub", False):
298 if ui.configbool(b"perf", b"stub", False):
297 return lambda x: 1
299 return lambda x: 1
298 return len
300 return len
299
301
300 class noop(object):
302 class noop(object):
301 """dummy context manager"""
303 """dummy context manager"""
302 def __enter__(self):
304 def __enter__(self):
303 pass
305 pass
304 def __exit__(self, *args):
306 def __exit__(self, *args):
305 pass
307 pass
306
308
307 NOOPCTX = noop()
309 NOOPCTX = noop()
308
310
309 def gettimer(ui, opts=None):
311 def gettimer(ui, opts=None):
310 """return a timer function and formatter: (timer, formatter)
312 """return a timer function and formatter: (timer, formatter)
311
313
312 This function exists to gather the creation of formatter in a single
314 This function exists to gather the creation of formatter in a single
313 place instead of duplicating it in all performance commands."""
315 place instead of duplicating it in all performance commands."""
314
316
315 # enforce an idle period before execution to counteract power management
317 # enforce an idle period before execution to counteract power management
316 # experimental config: perf.presleep
318 # experimental config: perf.presleep
317 time.sleep(getint(ui, b"perf", b"presleep", 1))
319 time.sleep(getint(ui, b"perf", b"presleep", 1))
318
320
319 if opts is None:
321 if opts is None:
320 opts = {}
322 opts = {}
321 # redirect all to stderr unless buffer api is in use
323 # redirect all to stderr unless buffer api is in use
322 if not ui._buffers:
324 if not ui._buffers:
323 ui = ui.copy()
325 ui = ui.copy()
324 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
326 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
325 if uifout:
327 if uifout:
326 # for "historical portability":
328 # for "historical portability":
327 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
329 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
328 uifout.set(ui.ferr)
330 uifout.set(ui.ferr)
329
331
330 # get a formatter
332 # get a formatter
331 uiformatter = getattr(ui, 'formatter', None)
333 uiformatter = getattr(ui, 'formatter', None)
332 if uiformatter:
334 if uiformatter:
333 fm = uiformatter(b'perf', opts)
335 fm = uiformatter(b'perf', opts)
334 else:
336 else:
335 # for "historical portability":
337 # for "historical portability":
336 # define formatter locally, because ui.formatter has been
338 # define formatter locally, because ui.formatter has been
337 # available since 2.2 (or ae5f92e154d3)
339 # available since 2.2 (or ae5f92e154d3)
338 from mercurial import node
340 from mercurial import node
339 class defaultformatter(object):
341 class defaultformatter(object):
340 """Minimized composition of baseformatter and plainformatter
342 """Minimized composition of baseformatter and plainformatter
341 """
343 """
342 def __init__(self, ui, topic, opts):
344 def __init__(self, ui, topic, opts):
343 self._ui = ui
345 self._ui = ui
344 if ui.debugflag:
346 if ui.debugflag:
345 self.hexfunc = node.hex
347 self.hexfunc = node.hex
346 else:
348 else:
347 self.hexfunc = node.short
349 self.hexfunc = node.short
348 def __nonzero__(self):
350 def __nonzero__(self):
349 return False
351 return False
350 __bool__ = __nonzero__
352 __bool__ = __nonzero__
351 def startitem(self):
353 def startitem(self):
352 pass
354 pass
353 def data(self, **data):
355 def data(self, **data):
354 pass
356 pass
355 def write(self, fields, deftext, *fielddata, **opts):
357 def write(self, fields, deftext, *fielddata, **opts):
356 self._ui.write(deftext % fielddata, **opts)
358 self._ui.write(deftext % fielddata, **opts)
357 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
359 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
358 if cond:
360 if cond:
359 self._ui.write(deftext % fielddata, **opts)
361 self._ui.write(deftext % fielddata, **opts)
360 def plain(self, text, **opts):
362 def plain(self, text, **opts):
361 self._ui.write(text, **opts)
363 self._ui.write(text, **opts)
362 def end(self):
364 def end(self):
363 pass
365 pass
364 fm = defaultformatter(ui, b'perf', opts)
366 fm = defaultformatter(ui, b'perf', opts)
365
367
366 # stub function, runs code only once instead of in a loop
368 # stub function, runs code only once instead of in a loop
367 # experimental config: perf.stub
369 # experimental config: perf.stub
368 if ui.configbool(b"perf", b"stub", False):
370 if ui.configbool(b"perf", b"stub", False):
369 return functools.partial(stub_timer, fm), fm
371 return functools.partial(stub_timer, fm), fm
370
372
371 # experimental config: perf.all-timing
373 # experimental config: perf.all-timing
372 displayall = ui.configbool(b"perf", b"all-timing", False)
374 displayall = ui.configbool(b"perf", b"all-timing", False)
373
375
374 # experimental config: perf.run-limits
376 # experimental config: perf.run-limits
375 limitspec = ui.configlist(b"perf", b"run-limits", [])
377 limitspec = ui.configlist(b"perf", b"run-limits", [])
376 limits = []
378 limits = []
377 for item in limitspec:
379 for item in limitspec:
378 parts = item.split(b'-', 1)
380 parts = item.split(b'-', 1)
379 if len(parts) < 2:
381 if len(parts) < 2:
380 ui.warn((b'malformatted run limit entry, missing "-": %s\n'
382 ui.warn((b'malformatted run limit entry, missing "-": %s\n'
381 % item))
383 % item))
382 continue
384 continue
383 try:
385 try:
384 time_limit = float(pycompat.sysstr(parts[0]))
386 time_limit = float(_sysstr(parts[0]))
385 except ValueError as e:
387 except ValueError as e:
386 ui.warn((b'malformatted run limit entry, %s: %s\n'
388 ui.warn((b'malformatted run limit entry, %s: %s\n'
387 % (pycompat.bytestr(e), item)))
389 % (_bytestr(e), item)))
388 continue
390 continue
389 try:
391 try:
390 run_limit = int(pycompat.sysstr(parts[1]))
392 run_limit = int(_sysstr(parts[1]))
391 except ValueError as e:
393 except ValueError as e:
392 ui.warn((b'malformatted run limit entry, %s: %s\n'
394 ui.warn((b'malformatted run limit entry, %s: %s\n'
393 % (pycompat.bytestr(e), item)))
395 % (_bytestr(e), item)))
394 continue
396 continue
395 limits.append((time_limit, run_limit))
397 limits.append((time_limit, run_limit))
396 if not limits:
398 if not limits:
397 limits = DEFAULTLIMITS
399 limits = DEFAULTLIMITS
398
400
399 profiler = None
401 profiler = None
400 if profiling is not None:
402 if profiling is not None:
401 if ui.configbool(b"perf", b"profile-benchmark", False):
403 if ui.configbool(b"perf", b"profile-benchmark", False):
402 profiler = profiling.profile(ui)
404 profiler = profiling.profile(ui)
403
405
404 prerun = getint(ui, b"perf", b"pre-run", 0)
406 prerun = getint(ui, b"perf", b"pre-run", 0)
405 t = functools.partial(_timer, fm, displayall=displayall, limits=limits,
407 t = functools.partial(_timer, fm, displayall=displayall, limits=limits,
406 prerun=prerun, profiler=profiler)
408 prerun=prerun, profiler=profiler)
407 return t, fm
409 return t, fm
408
410
409 def stub_timer(fm, func, setup=None, title=None):
411 def stub_timer(fm, func, setup=None, title=None):
410 if setup is not None:
412 if setup is not None:
411 setup()
413 setup()
412 func()
414 func()
413
415
414 @contextlib.contextmanager
416 @contextlib.contextmanager
415 def timeone():
417 def timeone():
416 r = []
418 r = []
417 ostart = os.times()
419 ostart = os.times()
418 cstart = util.timer()
420 cstart = util.timer()
419 yield r
421 yield r
420 cstop = util.timer()
422 cstop = util.timer()
421 ostop = os.times()
423 ostop = os.times()
422 a, b = ostart, ostop
424 a, b = ostart, ostop
423 r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
425 r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
424
426
425
427
426 # list of stop condition (elapsed time, minimal run count)
428 # list of stop condition (elapsed time, minimal run count)
427 DEFAULTLIMITS = (
429 DEFAULTLIMITS = (
428 (3.0, 100),
430 (3.0, 100),
429 (10.0, 3),
431 (10.0, 3),
430 )
432 )
431
433
432 def _timer(fm, func, setup=None, title=None, displayall=False,
434 def _timer(fm, func, setup=None, title=None, displayall=False,
433 limits=DEFAULTLIMITS, prerun=0, profiler=None):
435 limits=DEFAULTLIMITS, prerun=0, profiler=None):
434 gc.collect()
436 gc.collect()
435 results = []
437 results = []
436 begin = util.timer()
438 begin = util.timer()
437 count = 0
439 count = 0
438 if profiler is None:
440 if profiler is None:
439 profiler = NOOPCTX
441 profiler = NOOPCTX
440 for i in range(prerun):
442 for i in range(prerun):
441 if setup is not None:
443 if setup is not None:
442 setup()
444 setup()
443 func()
445 func()
444 keepgoing = True
446 keepgoing = True
445 while keepgoing:
447 while keepgoing:
446 if setup is not None:
448 if setup is not None:
447 setup()
449 setup()
448 with profiler:
450 with profiler:
449 with timeone() as item:
451 with timeone() as item:
450 r = func()
452 r = func()
451 profiler = NOOPCTX
453 profiler = NOOPCTX
452 count += 1
454 count += 1
453 results.append(item[0])
455 results.append(item[0])
454 cstop = util.timer()
456 cstop = util.timer()
455 # Look for a stop condition.
457 # Look for a stop condition.
456 elapsed = cstop - begin
458 elapsed = cstop - begin
457 for t, mincount in limits:
459 for t, mincount in limits:
458 if elapsed >= t and count >= mincount:
460 if elapsed >= t and count >= mincount:
459 keepgoing = False
461 keepgoing = False
460 break
462 break
461
463
462 formatone(fm, results, title=title, result=r,
464 formatone(fm, results, title=title, result=r,
463 displayall=displayall)
465 displayall=displayall)
464
466
465 def formatone(fm, timings, title=None, result=None, displayall=False):
467 def formatone(fm, timings, title=None, result=None, displayall=False):
466
468
467 count = len(timings)
469 count = len(timings)
468
470
469 fm.startitem()
471 fm.startitem()
470
472
471 if title:
473 if title:
472 fm.write(b'title', b'! %s\n', title)
474 fm.write(b'title', b'! %s\n', title)
473 if result:
475 if result:
474 fm.write(b'result', b'! result: %s\n', result)
476 fm.write(b'result', b'! result: %s\n', result)
475 def display(role, entry):
477 def display(role, entry):
476 prefix = b''
478 prefix = b''
477 if role != b'best':
479 if role != b'best':
478 prefix = b'%s.' % role
480 prefix = b'%s.' % role
479 fm.plain(b'!')
481 fm.plain(b'!')
480 fm.write(prefix + b'wall', b' wall %f', entry[0])
482 fm.write(prefix + b'wall', b' wall %f', entry[0])
481 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
483 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
482 fm.write(prefix + b'user', b' user %f', entry[1])
484 fm.write(prefix + b'user', b' user %f', entry[1])
483 fm.write(prefix + b'sys', b' sys %f', entry[2])
485 fm.write(prefix + b'sys', b' sys %f', entry[2])
484 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
486 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
485 fm.plain(b'\n')
487 fm.plain(b'\n')
486 timings.sort()
488 timings.sort()
487 min_val = timings[0]
489 min_val = timings[0]
488 display(b'best', min_val)
490 display(b'best', min_val)
489 if displayall:
491 if displayall:
490 max_val = timings[-1]
492 max_val = timings[-1]
491 display(b'max', max_val)
493 display(b'max', max_val)
492 avg = tuple([sum(x) / count for x in zip(*timings)])
494 avg = tuple([sum(x) / count for x in zip(*timings)])
493 display(b'avg', avg)
495 display(b'avg', avg)
494 median = timings[len(timings) // 2]
496 median = timings[len(timings) // 2]
495 display(b'median', median)
497 display(b'median', median)
496
498
497 # utilities for historical portability
499 # utilities for historical portability
498
500
499 def getint(ui, section, name, default):
501 def getint(ui, section, name, default):
500 # for "historical portability":
502 # for "historical portability":
501 # ui.configint has been available since 1.9 (or fa2b596db182)
503 # ui.configint has been available since 1.9 (or fa2b596db182)
502 v = ui.config(section, name, None)
504 v = ui.config(section, name, None)
503 if v is None:
505 if v is None:
504 return default
506 return default
505 try:
507 try:
506 return int(v)
508 return int(v)
507 except ValueError:
509 except ValueError:
508 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
510 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
509 % (section, name, v))
511 % (section, name, v))
510
512
511 def safeattrsetter(obj, name, ignoremissing=False):
513 def safeattrsetter(obj, name, ignoremissing=False):
512 """Ensure that 'obj' has 'name' attribute before subsequent setattr
514 """Ensure that 'obj' has 'name' attribute before subsequent setattr
513
515
514 This function is aborted, if 'obj' doesn't have 'name' attribute
516 This function is aborted, if 'obj' doesn't have 'name' attribute
515 at runtime. This avoids overlooking removal of an attribute, which
517 at runtime. This avoids overlooking removal of an attribute, which
516 breaks assumption of performance measurement, in the future.
518 breaks assumption of performance measurement, in the future.
517
519
518 This function returns the object to (1) assign a new value, and
520 This function returns the object to (1) assign a new value, and
519 (2) restore an original value to the attribute.
521 (2) restore an original value to the attribute.
520
522
521 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
523 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
522 abortion, and this function returns None. This is useful to
524 abortion, and this function returns None. This is useful to
523 examine an attribute, which isn't ensured in all Mercurial
525 examine an attribute, which isn't ensured in all Mercurial
524 versions.
526 versions.
525 """
527 """
526 if not util.safehasattr(obj, name):
528 if not util.safehasattr(obj, name):
527 if ignoremissing:
529 if ignoremissing:
528 return None
530 return None
529 raise error.Abort((b"missing attribute %s of %s might break assumption"
531 raise error.Abort((b"missing attribute %s of %s might break assumption"
530 b" of performance measurement") % (name, obj))
532 b" of performance measurement") % (name, obj))
531
533
532 origvalue = getattr(obj, _sysstr(name))
534 origvalue = getattr(obj, _sysstr(name))
533 class attrutil(object):
535 class attrutil(object):
534 def set(self, newvalue):
536 def set(self, newvalue):
535 setattr(obj, _sysstr(name), newvalue)
537 setattr(obj, _sysstr(name), newvalue)
536 def restore(self):
538 def restore(self):
537 setattr(obj, _sysstr(name), origvalue)
539 setattr(obj, _sysstr(name), origvalue)
538
540
539 return attrutil()
541 return attrutil()
540
542
541 # utilities to examine each internal API changes
543 # utilities to examine each internal API changes
542
544
543 def getbranchmapsubsettable():
545 def getbranchmapsubsettable():
544 # for "historical portability":
546 # for "historical portability":
545 # subsettable is defined in:
547 # subsettable is defined in:
546 # - branchmap since 2.9 (or 175c6fd8cacc)
548 # - branchmap since 2.9 (or 175c6fd8cacc)
547 # - repoview since 2.5 (or 59a9f18d4587)
549 # - repoview since 2.5 (or 59a9f18d4587)
548 # - repoviewutil since 5.0
550 # - repoviewutil since 5.0
549 for mod in (branchmap, repoview, repoviewutil):
551 for mod in (branchmap, repoview, repoviewutil):
550 subsettable = getattr(mod, 'subsettable', None)
552 subsettable = getattr(mod, 'subsettable', None)
551 if subsettable:
553 if subsettable:
552 return subsettable
554 return subsettable
553
555
554 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
556 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
555 # branchmap and repoview modules exist, but subsettable attribute
557 # branchmap and repoview modules exist, but subsettable attribute
556 # doesn't)
558 # doesn't)
557 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
559 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
558 hint=b"use 2.5 or later")
560 hint=b"use 2.5 or later")
559
561
560 def getsvfs(repo):
562 def getsvfs(repo):
561 """Return appropriate object to access files under .hg/store
563 """Return appropriate object to access files under .hg/store
562 """
564 """
563 # for "historical portability":
565 # for "historical portability":
564 # repo.svfs has been available since 2.3 (or 7034365089bf)
566 # repo.svfs has been available since 2.3 (or 7034365089bf)
565 svfs = getattr(repo, 'svfs', None)
567 svfs = getattr(repo, 'svfs', None)
566 if svfs:
568 if svfs:
567 return svfs
569 return svfs
568 else:
570 else:
569 return getattr(repo, 'sopener')
571 return getattr(repo, 'sopener')
570
572
571 def getvfs(repo):
573 def getvfs(repo):
572 """Return appropriate object to access files under .hg
574 """Return appropriate object to access files under .hg
573 """
575 """
574 # for "historical portability":
576 # for "historical portability":
575 # repo.vfs has been available since 2.3 (or 7034365089bf)
577 # repo.vfs has been available since 2.3 (or 7034365089bf)
576 vfs = getattr(repo, 'vfs', None)
578 vfs = getattr(repo, 'vfs', None)
577 if vfs:
579 if vfs:
578 return vfs
580 return vfs
579 else:
581 else:
580 return getattr(repo, 'opener')
582 return getattr(repo, 'opener')
581
583
582 def repocleartagscachefunc(repo):
584 def repocleartagscachefunc(repo):
583 """Return the function to clear tags cache according to repo internal API
585 """Return the function to clear tags cache according to repo internal API
584 """
586 """
585 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
587 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
586 # in this case, setattr(repo, '_tagscache', None) or so isn't
588 # in this case, setattr(repo, '_tagscache', None) or so isn't
587 # correct way to clear tags cache, because existing code paths
589 # correct way to clear tags cache, because existing code paths
588 # expect _tagscache to be a structured object.
590 # expect _tagscache to be a structured object.
589 def clearcache():
591 def clearcache():
590 # _tagscache has been filteredpropertycache since 2.5 (or
592 # _tagscache has been filteredpropertycache since 2.5 (or
591 # 98c867ac1330), and delattr() can't work in such case
593 # 98c867ac1330), and delattr() can't work in such case
592 if b'_tagscache' in vars(repo):
594 if b'_tagscache' in vars(repo):
593 del repo.__dict__[b'_tagscache']
595 del repo.__dict__[b'_tagscache']
594 return clearcache
596 return clearcache
595
597
596 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
598 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
597 if repotags: # since 1.4 (or 5614a628d173)
599 if repotags: # since 1.4 (or 5614a628d173)
598 return lambda : repotags.set(None)
600 return lambda : repotags.set(None)
599
601
600 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
602 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
601 if repotagscache: # since 0.6 (or d7df759d0e97)
603 if repotagscache: # since 0.6 (or d7df759d0e97)
602 return lambda : repotagscache.set(None)
604 return lambda : repotagscache.set(None)
603
605
604 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
606 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
605 # this point, but it isn't so problematic, because:
607 # this point, but it isn't so problematic, because:
606 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
608 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
607 # in perftags() causes failure soon
609 # in perftags() causes failure soon
608 # - perf.py itself has been available since 1.1 (or eb240755386d)
610 # - perf.py itself has been available since 1.1 (or eb240755386d)
609 raise error.Abort((b"tags API of this hg command is unknown"))
611 raise error.Abort((b"tags API of this hg command is unknown"))
610
612
611 # utilities to clear cache
613 # utilities to clear cache
612
614
613 def clearfilecache(obj, attrname):
615 def clearfilecache(obj, attrname):
614 unfiltered = getattr(obj, 'unfiltered', None)
616 unfiltered = getattr(obj, 'unfiltered', None)
615 if unfiltered is not None:
617 if unfiltered is not None:
616 obj = obj.unfiltered()
618 obj = obj.unfiltered()
617 if attrname in vars(obj):
619 if attrname in vars(obj):
618 delattr(obj, attrname)
620 delattr(obj, attrname)
619 obj._filecache.pop(attrname, None)
621 obj._filecache.pop(attrname, None)
620
622
621 def clearchangelog(repo):
623 def clearchangelog(repo):
622 if repo is not repo.unfiltered():
624 if repo is not repo.unfiltered():
623 object.__setattr__(repo, r'_clcachekey', None)
625 object.__setattr__(repo, r'_clcachekey', None)
624 object.__setattr__(repo, r'_clcache', None)
626 object.__setattr__(repo, r'_clcache', None)
625 clearfilecache(repo.unfiltered(), 'changelog')
627 clearfilecache(repo.unfiltered(), 'changelog')
626
628
627 # perf commands
629 # perf commands
628
630
629 @command(b'perfwalk', formatteropts)
631 @command(b'perfwalk', formatteropts)
630 def perfwalk(ui, repo, *pats, **opts):
632 def perfwalk(ui, repo, *pats, **opts):
631 opts = _byteskwargs(opts)
633 opts = _byteskwargs(opts)
632 timer, fm = gettimer(ui, opts)
634 timer, fm = gettimer(ui, opts)
633 m = scmutil.match(repo[None], pats, {})
635 m = scmutil.match(repo[None], pats, {})
634 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
636 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
635 ignored=False))))
637 ignored=False))))
636 fm.end()
638 fm.end()
637
639
638 @command(b'perfannotate', formatteropts)
640 @command(b'perfannotate', formatteropts)
639 def perfannotate(ui, repo, f, **opts):
641 def perfannotate(ui, repo, f, **opts):
640 opts = _byteskwargs(opts)
642 opts = _byteskwargs(opts)
641 timer, fm = gettimer(ui, opts)
643 timer, fm = gettimer(ui, opts)
642 fc = repo[b'.'][f]
644 fc = repo[b'.'][f]
643 timer(lambda: len(fc.annotate(True)))
645 timer(lambda: len(fc.annotate(True)))
644 fm.end()
646 fm.end()
645
647
646 @command(b'perfstatus',
648 @command(b'perfstatus',
647 [(b'u', b'unknown', False,
649 [(b'u', b'unknown', False,
648 b'ask status to look for unknown files')] + formatteropts)
650 b'ask status to look for unknown files')] + formatteropts)
649 def perfstatus(ui, repo, **opts):
651 def perfstatus(ui, repo, **opts):
650 opts = _byteskwargs(opts)
652 opts = _byteskwargs(opts)
651 #m = match.always(repo.root, repo.getcwd())
653 #m = match.always(repo.root, repo.getcwd())
652 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
654 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
653 # False))))
655 # False))))
654 timer, fm = gettimer(ui, opts)
656 timer, fm = gettimer(ui, opts)
655 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
657 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
656 fm.end()
658 fm.end()
657
659
658 @command(b'perfaddremove', formatteropts)
660 @command(b'perfaddremove', formatteropts)
659 def perfaddremove(ui, repo, **opts):
661 def perfaddremove(ui, repo, **opts):
660 opts = _byteskwargs(opts)
662 opts = _byteskwargs(opts)
661 timer, fm = gettimer(ui, opts)
663 timer, fm = gettimer(ui, opts)
662 try:
664 try:
663 oldquiet = repo.ui.quiet
665 oldquiet = repo.ui.quiet
664 repo.ui.quiet = True
666 repo.ui.quiet = True
665 matcher = scmutil.match(repo[None])
667 matcher = scmutil.match(repo[None])
666 opts[b'dry_run'] = True
668 opts[b'dry_run'] = True
667 if b'uipathfn' in getargspec(scmutil.addremove).args:
669 if b'uipathfn' in getargspec(scmutil.addremove).args:
668 uipathfn = scmutil.getuipathfn(repo)
670 uipathfn = scmutil.getuipathfn(repo)
669 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
671 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
670 else:
672 else:
671 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
673 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
672 finally:
674 finally:
673 repo.ui.quiet = oldquiet
675 repo.ui.quiet = oldquiet
674 fm.end()
676 fm.end()
675
677
676 def clearcaches(cl):
678 def clearcaches(cl):
677 # behave somewhat consistently across internal API changes
679 # behave somewhat consistently across internal API changes
678 if util.safehasattr(cl, b'clearcaches'):
680 if util.safehasattr(cl, b'clearcaches'):
679 cl.clearcaches()
681 cl.clearcaches()
680 elif util.safehasattr(cl, b'_nodecache'):
682 elif util.safehasattr(cl, b'_nodecache'):
681 from mercurial.node import nullid, nullrev
683 from mercurial.node import nullid, nullrev
682 cl._nodecache = {nullid: nullrev}
684 cl._nodecache = {nullid: nullrev}
683 cl._nodepos = None
685 cl._nodepos = None
684
686
685 @command(b'perfheads', formatteropts)
687 @command(b'perfheads', formatteropts)
686 def perfheads(ui, repo, **opts):
688 def perfheads(ui, repo, **opts):
687 """benchmark the computation of a changelog heads"""
689 """benchmark the computation of a changelog heads"""
688 opts = _byteskwargs(opts)
690 opts = _byteskwargs(opts)
689 timer, fm = gettimer(ui, opts)
691 timer, fm = gettimer(ui, opts)
690 cl = repo.changelog
692 cl = repo.changelog
691 def s():
693 def s():
692 clearcaches(cl)
694 clearcaches(cl)
693 def d():
695 def d():
694 len(cl.headrevs())
696 len(cl.headrevs())
695 timer(d, setup=s)
697 timer(d, setup=s)
696 fm.end()
698 fm.end()
697
699
698 @command(b'perftags', formatteropts+
700 @command(b'perftags', formatteropts+
699 [
701 [
700 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
702 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
701 ])
703 ])
702 def perftags(ui, repo, **opts):
704 def perftags(ui, repo, **opts):
703 opts = _byteskwargs(opts)
705 opts = _byteskwargs(opts)
704 timer, fm = gettimer(ui, opts)
706 timer, fm = gettimer(ui, opts)
705 repocleartagscache = repocleartagscachefunc(repo)
707 repocleartagscache = repocleartagscachefunc(repo)
706 clearrevlogs = opts[b'clear_revlogs']
708 clearrevlogs = opts[b'clear_revlogs']
707 def s():
709 def s():
708 if clearrevlogs:
710 if clearrevlogs:
709 clearchangelog(repo)
711 clearchangelog(repo)
710 clearfilecache(repo.unfiltered(), 'manifest')
712 clearfilecache(repo.unfiltered(), 'manifest')
711 repocleartagscache()
713 repocleartagscache()
712 def t():
714 def t():
713 return len(repo.tags())
715 return len(repo.tags())
714 timer(t, setup=s)
716 timer(t, setup=s)
715 fm.end()
717 fm.end()
716
718
717 @command(b'perfancestors', formatteropts)
719 @command(b'perfancestors', formatteropts)
718 def perfancestors(ui, repo, **opts):
720 def perfancestors(ui, repo, **opts):
719 opts = _byteskwargs(opts)
721 opts = _byteskwargs(opts)
720 timer, fm = gettimer(ui, opts)
722 timer, fm = gettimer(ui, opts)
721 heads = repo.changelog.headrevs()
723 heads = repo.changelog.headrevs()
722 def d():
724 def d():
723 for a in repo.changelog.ancestors(heads):
725 for a in repo.changelog.ancestors(heads):
724 pass
726 pass
725 timer(d)
727 timer(d)
726 fm.end()
728 fm.end()
727
729
728 @command(b'perfancestorset', formatteropts)
730 @command(b'perfancestorset', formatteropts)
729 def perfancestorset(ui, repo, revset, **opts):
731 def perfancestorset(ui, repo, revset, **opts):
730 opts = _byteskwargs(opts)
732 opts = _byteskwargs(opts)
731 timer, fm = gettimer(ui, opts)
733 timer, fm = gettimer(ui, opts)
732 revs = repo.revs(revset)
734 revs = repo.revs(revset)
733 heads = repo.changelog.headrevs()
735 heads = repo.changelog.headrevs()
734 def d():
736 def d():
735 s = repo.changelog.ancestors(heads)
737 s = repo.changelog.ancestors(heads)
736 for rev in revs:
738 for rev in revs:
737 rev in s
739 rev in s
738 timer(d)
740 timer(d)
739 fm.end()
741 fm.end()
740
742
741 @command(b'perfdiscovery', formatteropts, b'PATH')
743 @command(b'perfdiscovery', formatteropts, b'PATH')
742 def perfdiscovery(ui, repo, path, **opts):
744 def perfdiscovery(ui, repo, path, **opts):
743 """benchmark discovery between local repo and the peer at given path
745 """benchmark discovery between local repo and the peer at given path
744 """
746 """
745 repos = [repo, None]
747 repos = [repo, None]
746 timer, fm = gettimer(ui, opts)
748 timer, fm = gettimer(ui, opts)
747 path = ui.expandpath(path)
749 path = ui.expandpath(path)
748
750
749 def s():
751 def s():
750 repos[1] = hg.peer(ui, opts, path)
752 repos[1] = hg.peer(ui, opts, path)
751 def d():
753 def d():
752 setdiscovery.findcommonheads(ui, *repos)
754 setdiscovery.findcommonheads(ui, *repos)
753 timer(d, setup=s)
755 timer(d, setup=s)
754 fm.end()
756 fm.end()
755
757
756 @command(b'perfbookmarks', formatteropts +
758 @command(b'perfbookmarks', formatteropts +
757 [
759 [
758 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
760 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
759 ])
761 ])
760 def perfbookmarks(ui, repo, **opts):
762 def perfbookmarks(ui, repo, **opts):
761 """benchmark parsing bookmarks from disk to memory"""
763 """benchmark parsing bookmarks from disk to memory"""
762 opts = _byteskwargs(opts)
764 opts = _byteskwargs(opts)
763 timer, fm = gettimer(ui, opts)
765 timer, fm = gettimer(ui, opts)
764
766
765 clearrevlogs = opts[b'clear_revlogs']
767 clearrevlogs = opts[b'clear_revlogs']
766 def s():
768 def s():
767 if clearrevlogs:
769 if clearrevlogs:
768 clearchangelog(repo)
770 clearchangelog(repo)
769 clearfilecache(repo, b'_bookmarks')
771 clearfilecache(repo, b'_bookmarks')
770 def d():
772 def d():
771 repo._bookmarks
773 repo._bookmarks
772 timer(d, setup=s)
774 timer(d, setup=s)
773 fm.end()
775 fm.end()
774
776
775 @command(b'perfbundleread', formatteropts, b'BUNDLE')
777 @command(b'perfbundleread', formatteropts, b'BUNDLE')
776 def perfbundleread(ui, repo, bundlepath, **opts):
778 def perfbundleread(ui, repo, bundlepath, **opts):
777 """Benchmark reading of bundle files.
779 """Benchmark reading of bundle files.
778
780
779 This command is meant to isolate the I/O part of bundle reading as
781 This command is meant to isolate the I/O part of bundle reading as
780 much as possible.
782 much as possible.
781 """
783 """
782 from mercurial import (
784 from mercurial import (
783 bundle2,
785 bundle2,
784 exchange,
786 exchange,
785 streamclone,
787 streamclone,
786 )
788 )
787
789
788 opts = _byteskwargs(opts)
790 opts = _byteskwargs(opts)
789
791
790 def makebench(fn):
792 def makebench(fn):
791 def run():
793 def run():
792 with open(bundlepath, b'rb') as fh:
794 with open(bundlepath, b'rb') as fh:
793 bundle = exchange.readbundle(ui, fh, bundlepath)
795 bundle = exchange.readbundle(ui, fh, bundlepath)
794 fn(bundle)
796 fn(bundle)
795
797
796 return run
798 return run
797
799
798 def makereadnbytes(size):
800 def makereadnbytes(size):
799 def run():
801 def run():
800 with open(bundlepath, b'rb') as fh:
802 with open(bundlepath, b'rb') as fh:
801 bundle = exchange.readbundle(ui, fh, bundlepath)
803 bundle = exchange.readbundle(ui, fh, bundlepath)
802 while bundle.read(size):
804 while bundle.read(size):
803 pass
805 pass
804
806
805 return run
807 return run
806
808
807 def makestdioread(size):
809 def makestdioread(size):
808 def run():
810 def run():
809 with open(bundlepath, b'rb') as fh:
811 with open(bundlepath, b'rb') as fh:
810 while fh.read(size):
812 while fh.read(size):
811 pass
813 pass
812
814
813 return run
815 return run
814
816
815 # bundle1
817 # bundle1
816
818
817 def deltaiter(bundle):
819 def deltaiter(bundle):
818 for delta in bundle.deltaiter():
820 for delta in bundle.deltaiter():
819 pass
821 pass
820
822
821 def iterchunks(bundle):
823 def iterchunks(bundle):
822 for chunk in bundle.getchunks():
824 for chunk in bundle.getchunks():
823 pass
825 pass
824
826
825 # bundle2
827 # bundle2
826
828
827 def forwardchunks(bundle):
829 def forwardchunks(bundle):
828 for chunk in bundle._forwardchunks():
830 for chunk in bundle._forwardchunks():
829 pass
831 pass
830
832
831 def iterparts(bundle):
833 def iterparts(bundle):
832 for part in bundle.iterparts():
834 for part in bundle.iterparts():
833 pass
835 pass
834
836
835 def iterpartsseekable(bundle):
837 def iterpartsseekable(bundle):
836 for part in bundle.iterparts(seekable=True):
838 for part in bundle.iterparts(seekable=True):
837 pass
839 pass
838
840
839 def seek(bundle):
841 def seek(bundle):
840 for part in bundle.iterparts(seekable=True):
842 for part in bundle.iterparts(seekable=True):
841 part.seek(0, os.SEEK_END)
843 part.seek(0, os.SEEK_END)
842
844
843 def makepartreadnbytes(size):
845 def makepartreadnbytes(size):
844 def run():
846 def run():
845 with open(bundlepath, b'rb') as fh:
847 with open(bundlepath, b'rb') as fh:
846 bundle = exchange.readbundle(ui, fh, bundlepath)
848 bundle = exchange.readbundle(ui, fh, bundlepath)
847 for part in bundle.iterparts():
849 for part in bundle.iterparts():
848 while part.read(size):
850 while part.read(size):
849 pass
851 pass
850
852
851 return run
853 return run
852
854
853 benches = [
855 benches = [
854 (makestdioread(8192), b'read(8k)'),
856 (makestdioread(8192), b'read(8k)'),
855 (makestdioread(16384), b'read(16k)'),
857 (makestdioread(16384), b'read(16k)'),
856 (makestdioread(32768), b'read(32k)'),
858 (makestdioread(32768), b'read(32k)'),
857 (makestdioread(131072), b'read(128k)'),
859 (makestdioread(131072), b'read(128k)'),
858 ]
860 ]
859
861
860 with open(bundlepath, b'rb') as fh:
862 with open(bundlepath, b'rb') as fh:
861 bundle = exchange.readbundle(ui, fh, bundlepath)
863 bundle = exchange.readbundle(ui, fh, bundlepath)
862
864
863 if isinstance(bundle, changegroup.cg1unpacker):
865 if isinstance(bundle, changegroup.cg1unpacker):
864 benches.extend([
866 benches.extend([
865 (makebench(deltaiter), b'cg1 deltaiter()'),
867 (makebench(deltaiter), b'cg1 deltaiter()'),
866 (makebench(iterchunks), b'cg1 getchunks()'),
868 (makebench(iterchunks), b'cg1 getchunks()'),
867 (makereadnbytes(8192), b'cg1 read(8k)'),
869 (makereadnbytes(8192), b'cg1 read(8k)'),
868 (makereadnbytes(16384), b'cg1 read(16k)'),
870 (makereadnbytes(16384), b'cg1 read(16k)'),
869 (makereadnbytes(32768), b'cg1 read(32k)'),
871 (makereadnbytes(32768), b'cg1 read(32k)'),
870 (makereadnbytes(131072), b'cg1 read(128k)'),
872 (makereadnbytes(131072), b'cg1 read(128k)'),
871 ])
873 ])
872 elif isinstance(bundle, bundle2.unbundle20):
874 elif isinstance(bundle, bundle2.unbundle20):
873 benches.extend([
875 benches.extend([
874 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
876 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
875 (makebench(iterparts), b'bundle2 iterparts()'),
877 (makebench(iterparts), b'bundle2 iterparts()'),
876 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
878 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
877 (makebench(seek), b'bundle2 part seek()'),
879 (makebench(seek), b'bundle2 part seek()'),
878 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
880 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
879 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
881 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
880 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
882 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
881 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
883 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
882 ])
884 ])
883 elif isinstance(bundle, streamclone.streamcloneapplier):
885 elif isinstance(bundle, streamclone.streamcloneapplier):
884 raise error.Abort(b'stream clone bundles not supported')
886 raise error.Abort(b'stream clone bundles not supported')
885 else:
887 else:
886 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
888 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
887
889
888 for fn, title in benches:
890 for fn, title in benches:
889 timer, fm = gettimer(ui, opts)
891 timer, fm = gettimer(ui, opts)
890 timer(fn, title=title)
892 timer(fn, title=title)
891 fm.end()
893 fm.end()
892
894
893 @command(b'perfchangegroupchangelog', formatteropts +
895 @command(b'perfchangegroupchangelog', formatteropts +
894 [(b'', b'cgversion', b'02', b'changegroup version'),
896 [(b'', b'cgversion', b'02', b'changegroup version'),
895 (b'r', b'rev', b'', b'revisions to add to changegroup')])
897 (b'r', b'rev', b'', b'revisions to add to changegroup')])
896 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
898 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
897 """Benchmark producing a changelog group for a changegroup.
899 """Benchmark producing a changelog group for a changegroup.
898
900
899 This measures the time spent processing the changelog during a
901 This measures the time spent processing the changelog during a
900 bundle operation. This occurs during `hg bundle` and on a server
902 bundle operation. This occurs during `hg bundle` and on a server
901 processing a `getbundle` wire protocol request (handles clones
903 processing a `getbundle` wire protocol request (handles clones
902 and pull requests).
904 and pull requests).
903
905
904 By default, all revisions are added to the changegroup.
906 By default, all revisions are added to the changegroup.
905 """
907 """
906 opts = _byteskwargs(opts)
908 opts = _byteskwargs(opts)
907 cl = repo.changelog
909 cl = repo.changelog
908 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
910 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
909 bundler = changegroup.getbundler(cgversion, repo)
911 bundler = changegroup.getbundler(cgversion, repo)
910
912
911 def d():
913 def d():
912 state, chunks = bundler._generatechangelog(cl, nodes)
914 state, chunks = bundler._generatechangelog(cl, nodes)
913 for chunk in chunks:
915 for chunk in chunks:
914 pass
916 pass
915
917
916 timer, fm = gettimer(ui, opts)
918 timer, fm = gettimer(ui, opts)
917
919
918 # Terminal printing can interfere with timing. So disable it.
920 # Terminal printing can interfere with timing. So disable it.
919 with ui.configoverride({(b'progress', b'disable'): True}):
921 with ui.configoverride({(b'progress', b'disable'): True}):
920 timer(d)
922 timer(d)
921
923
922 fm.end()
924 fm.end()
923
925
924 @command(b'perfdirs', formatteropts)
926 @command(b'perfdirs', formatteropts)
925 def perfdirs(ui, repo, **opts):
927 def perfdirs(ui, repo, **opts):
926 opts = _byteskwargs(opts)
928 opts = _byteskwargs(opts)
927 timer, fm = gettimer(ui, opts)
929 timer, fm = gettimer(ui, opts)
928 dirstate = repo.dirstate
930 dirstate = repo.dirstate
929 b'a' in dirstate
931 b'a' in dirstate
930 def d():
932 def d():
931 dirstate.hasdir(b'a')
933 dirstate.hasdir(b'a')
932 del dirstate._map._dirs
934 del dirstate._map._dirs
933 timer(d)
935 timer(d)
934 fm.end()
936 fm.end()
935
937
936 @command(b'perfdirstate', formatteropts)
938 @command(b'perfdirstate', formatteropts)
937 def perfdirstate(ui, repo, **opts):
939 def perfdirstate(ui, repo, **opts):
938 opts = _byteskwargs(opts)
940 opts = _byteskwargs(opts)
939 timer, fm = gettimer(ui, opts)
941 timer, fm = gettimer(ui, opts)
940 b"a" in repo.dirstate
942 b"a" in repo.dirstate
941 def d():
943 def d():
942 repo.dirstate.invalidate()
944 repo.dirstate.invalidate()
943 b"a" in repo.dirstate
945 b"a" in repo.dirstate
944 timer(d)
946 timer(d)
945 fm.end()
947 fm.end()
946
948
947 @command(b'perfdirstatedirs', formatteropts)
949 @command(b'perfdirstatedirs', formatteropts)
948 def perfdirstatedirs(ui, repo, **opts):
950 def perfdirstatedirs(ui, repo, **opts):
949 opts = _byteskwargs(opts)
951 opts = _byteskwargs(opts)
950 timer, fm = gettimer(ui, opts)
952 timer, fm = gettimer(ui, opts)
951 b"a" in repo.dirstate
953 b"a" in repo.dirstate
952 def d():
954 def d():
953 repo.dirstate.hasdir(b"a")
955 repo.dirstate.hasdir(b"a")
954 del repo.dirstate._map._dirs
956 del repo.dirstate._map._dirs
955 timer(d)
957 timer(d)
956 fm.end()
958 fm.end()
957
959
958 @command(b'perfdirstatefoldmap', formatteropts)
960 @command(b'perfdirstatefoldmap', formatteropts)
959 def perfdirstatefoldmap(ui, repo, **opts):
961 def perfdirstatefoldmap(ui, repo, **opts):
960 opts = _byteskwargs(opts)
962 opts = _byteskwargs(opts)
961 timer, fm = gettimer(ui, opts)
963 timer, fm = gettimer(ui, opts)
962 dirstate = repo.dirstate
964 dirstate = repo.dirstate
963 b'a' in dirstate
965 b'a' in dirstate
964 def d():
966 def d():
965 dirstate._map.filefoldmap.get(b'a')
967 dirstate._map.filefoldmap.get(b'a')
966 del dirstate._map.filefoldmap
968 del dirstate._map.filefoldmap
967 timer(d)
969 timer(d)
968 fm.end()
970 fm.end()
969
971
970 @command(b'perfdirfoldmap', formatteropts)
972 @command(b'perfdirfoldmap', formatteropts)
971 def perfdirfoldmap(ui, repo, **opts):
973 def perfdirfoldmap(ui, repo, **opts):
972 opts = _byteskwargs(opts)
974 opts = _byteskwargs(opts)
973 timer, fm = gettimer(ui, opts)
975 timer, fm = gettimer(ui, opts)
974 dirstate = repo.dirstate
976 dirstate = repo.dirstate
975 b'a' in dirstate
977 b'a' in dirstate
976 def d():
978 def d():
977 dirstate._map.dirfoldmap.get(b'a')
979 dirstate._map.dirfoldmap.get(b'a')
978 del dirstate._map.dirfoldmap
980 del dirstate._map.dirfoldmap
979 del dirstate._map._dirs
981 del dirstate._map._dirs
980 timer(d)
982 timer(d)
981 fm.end()
983 fm.end()
982
984
983 @command(b'perfdirstatewrite', formatteropts)
985 @command(b'perfdirstatewrite', formatteropts)
984 def perfdirstatewrite(ui, repo, **opts):
986 def perfdirstatewrite(ui, repo, **opts):
985 opts = _byteskwargs(opts)
987 opts = _byteskwargs(opts)
986 timer, fm = gettimer(ui, opts)
988 timer, fm = gettimer(ui, opts)
987 ds = repo.dirstate
989 ds = repo.dirstate
988 b"a" in ds
990 b"a" in ds
989 def d():
991 def d():
990 ds._dirty = True
992 ds._dirty = True
991 ds.write(repo.currenttransaction())
993 ds.write(repo.currenttransaction())
992 timer(d)
994 timer(d)
993 fm.end()
995 fm.end()
994
996
995 def _getmergerevs(repo, opts):
997 def _getmergerevs(repo, opts):
996 """parse command argument to return rev involved in merge
998 """parse command argument to return rev involved in merge
997
999
998 input: options dictionnary with `rev`, `from` and `bse`
1000 input: options dictionnary with `rev`, `from` and `bse`
999 output: (localctx, otherctx, basectx)
1001 output: (localctx, otherctx, basectx)
1000 """
1002 """
1001 if opts[b'from']:
1003 if opts[b'from']:
1002 fromrev = scmutil.revsingle(repo, opts[b'from'])
1004 fromrev = scmutil.revsingle(repo, opts[b'from'])
1003 wctx = repo[fromrev]
1005 wctx = repo[fromrev]
1004 else:
1006 else:
1005 wctx = repo[None]
1007 wctx = repo[None]
1006 # we don't want working dir files to be stat'd in the benchmark, so
1008 # we don't want working dir files to be stat'd in the benchmark, so
1007 # prime that cache
1009 # prime that cache
1008 wctx.dirty()
1010 wctx.dirty()
1009 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1011 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1010 if opts[b'base']:
1012 if opts[b'base']:
1011 fromrev = scmutil.revsingle(repo, opts[b'base'])
1013 fromrev = scmutil.revsingle(repo, opts[b'base'])
1012 ancestor = repo[fromrev]
1014 ancestor = repo[fromrev]
1013 else:
1015 else:
1014 ancestor = wctx.ancestor(rctx)
1016 ancestor = wctx.ancestor(rctx)
1015 return (wctx, rctx, ancestor)
1017 return (wctx, rctx, ancestor)
1016
1018
1017 @command(b'perfmergecalculate',
1019 @command(b'perfmergecalculate',
1018 [
1020 [
1019 (b'r', b'rev', b'.', b'rev to merge against'),
1021 (b'r', b'rev', b'.', b'rev to merge against'),
1020 (b'', b'from', b'', b'rev to merge from'),
1022 (b'', b'from', b'', b'rev to merge from'),
1021 (b'', b'base', b'', b'the revision to use as base'),
1023 (b'', b'base', b'', b'the revision to use as base'),
1022 ] + formatteropts)
1024 ] + formatteropts)
1023 def perfmergecalculate(ui, repo, **opts):
1025 def perfmergecalculate(ui, repo, **opts):
1024 opts = _byteskwargs(opts)
1026 opts = _byteskwargs(opts)
1025 timer, fm = gettimer(ui, opts)
1027 timer, fm = gettimer(ui, opts)
1026
1028
1027 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1029 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1028 def d():
1030 def d():
1029 # acceptremote is True because we don't want prompts in the middle of
1031 # acceptremote is True because we don't want prompts in the middle of
1030 # our benchmark
1032 # our benchmark
1031 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
1033 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
1032 acceptremote=True, followcopies=True)
1034 acceptremote=True, followcopies=True)
1033 timer(d)
1035 timer(d)
1034 fm.end()
1036 fm.end()
1035
1037
1036 @command(b'perfmergecopies',
1038 @command(b'perfmergecopies',
1037 [
1039 [
1038 (b'r', b'rev', b'.', b'rev to merge against'),
1040 (b'r', b'rev', b'.', b'rev to merge against'),
1039 (b'', b'from', b'', b'rev to merge from'),
1041 (b'', b'from', b'', b'rev to merge from'),
1040 (b'', b'base', b'', b'the revision to use as base'),
1042 (b'', b'base', b'', b'the revision to use as base'),
1041 ] + formatteropts)
1043 ] + formatteropts)
1042 def perfmergecopies(ui, repo, **opts):
1044 def perfmergecopies(ui, repo, **opts):
1043 """measure runtime of `copies.mergecopies`"""
1045 """measure runtime of `copies.mergecopies`"""
1044 opts = _byteskwargs(opts)
1046 opts = _byteskwargs(opts)
1045 timer, fm = gettimer(ui, opts)
1047 timer, fm = gettimer(ui, opts)
1046 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1048 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1047 def d():
1049 def d():
1048 # acceptremote is True because we don't want prompts in the middle of
1050 # acceptremote is True because we don't want prompts in the middle of
1049 # our benchmark
1051 # our benchmark
1050 copies.mergecopies(repo, wctx, rctx, ancestor)
1052 copies.mergecopies(repo, wctx, rctx, ancestor)
1051 timer(d)
1053 timer(d)
1052 fm.end()
1054 fm.end()
1053
1055
1054 @command(b'perfpathcopies', [], b"REV REV")
1056 @command(b'perfpathcopies', [], b"REV REV")
1055 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1057 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1056 """benchmark the copy tracing logic"""
1058 """benchmark the copy tracing logic"""
1057 opts = _byteskwargs(opts)
1059 opts = _byteskwargs(opts)
1058 timer, fm = gettimer(ui, opts)
1060 timer, fm = gettimer(ui, opts)
1059 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1061 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1060 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1062 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1061 def d():
1063 def d():
1062 copies.pathcopies(ctx1, ctx2)
1064 copies.pathcopies(ctx1, ctx2)
1063 timer(d)
1065 timer(d)
1064 fm.end()
1066 fm.end()
1065
1067
1066 @command(b'perfphases',
1068 @command(b'perfphases',
1067 [(b'', b'full', False, b'include file reading time too'),
1069 [(b'', b'full', False, b'include file reading time too'),
1068 ], b"")
1070 ], b"")
1069 def perfphases(ui, repo, **opts):
1071 def perfphases(ui, repo, **opts):
1070 """benchmark phasesets computation"""
1072 """benchmark phasesets computation"""
1071 opts = _byteskwargs(opts)
1073 opts = _byteskwargs(opts)
1072 timer, fm = gettimer(ui, opts)
1074 timer, fm = gettimer(ui, opts)
1073 _phases = repo._phasecache
1075 _phases = repo._phasecache
1074 full = opts.get(b'full')
1076 full = opts.get(b'full')
1075 def d():
1077 def d():
1076 phases = _phases
1078 phases = _phases
1077 if full:
1079 if full:
1078 clearfilecache(repo, b'_phasecache')
1080 clearfilecache(repo, b'_phasecache')
1079 phases = repo._phasecache
1081 phases = repo._phasecache
1080 phases.invalidate()
1082 phases.invalidate()
1081 phases.loadphaserevs(repo)
1083 phases.loadphaserevs(repo)
1082 timer(d)
1084 timer(d)
1083 fm.end()
1085 fm.end()
1084
1086
1085 @command(b'perfphasesremote',
1087 @command(b'perfphasesremote',
1086 [], b"[DEST]")
1088 [], b"[DEST]")
1087 def perfphasesremote(ui, repo, dest=None, **opts):
1089 def perfphasesremote(ui, repo, dest=None, **opts):
1088 """benchmark time needed to analyse phases of the remote server"""
1090 """benchmark time needed to analyse phases of the remote server"""
1089 from mercurial.node import (
1091 from mercurial.node import (
1090 bin,
1092 bin,
1091 )
1093 )
1092 from mercurial import (
1094 from mercurial import (
1093 exchange,
1095 exchange,
1094 hg,
1096 hg,
1095 phases,
1097 phases,
1096 )
1098 )
1097 opts = _byteskwargs(opts)
1099 opts = _byteskwargs(opts)
1098 timer, fm = gettimer(ui, opts)
1100 timer, fm = gettimer(ui, opts)
1099
1101
1100 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
1102 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
1101 if not path:
1103 if not path:
1102 raise error.Abort((b'default repository not configured!'),
1104 raise error.Abort((b'default repository not configured!'),
1103 hint=(b"see 'hg help config.paths'"))
1105 hint=(b"see 'hg help config.paths'"))
1104 dest = path.pushloc or path.loc
1106 dest = path.pushloc or path.loc
1105 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
1107 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
1106 other = hg.peer(repo, opts, dest)
1108 other = hg.peer(repo, opts, dest)
1107
1109
1108 # easier to perform discovery through the operation
1110 # easier to perform discovery through the operation
1109 op = exchange.pushoperation(repo, other)
1111 op = exchange.pushoperation(repo, other)
1110 exchange._pushdiscoverychangeset(op)
1112 exchange._pushdiscoverychangeset(op)
1111
1113
1112 remotesubset = op.fallbackheads
1114 remotesubset = op.fallbackheads
1113
1115
1114 with other.commandexecutor() as e:
1116 with other.commandexecutor() as e:
1115 remotephases = e.callcommand(b'listkeys',
1117 remotephases = e.callcommand(b'listkeys',
1116 {b'namespace': b'phases'}).result()
1118 {b'namespace': b'phases'}).result()
1117 del other
1119 del other
1118 publishing = remotephases.get(b'publishing', False)
1120 publishing = remotephases.get(b'publishing', False)
1119 if publishing:
1121 if publishing:
1120 ui.status((b'publishing: yes\n'))
1122 ui.status((b'publishing: yes\n'))
1121 else:
1123 else:
1122 ui.status((b'publishing: no\n'))
1124 ui.status((b'publishing: no\n'))
1123
1125
1124 nodemap = repo.changelog.nodemap
1126 nodemap = repo.changelog.nodemap
1125 nonpublishroots = 0
1127 nonpublishroots = 0
1126 for nhex, phase in remotephases.iteritems():
1128 for nhex, phase in remotephases.iteritems():
1127 if nhex == b'publishing': # ignore data related to publish option
1129 if nhex == b'publishing': # ignore data related to publish option
1128 continue
1130 continue
1129 node = bin(nhex)
1131 node = bin(nhex)
1130 if node in nodemap and int(phase):
1132 if node in nodemap and int(phase):
1131 nonpublishroots += 1
1133 nonpublishroots += 1
1132 ui.status((b'number of roots: %d\n') % len(remotephases))
1134 ui.status((b'number of roots: %d\n') % len(remotephases))
1133 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
1135 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
1134 def d():
1136 def d():
1135 phases.remotephasessummary(repo,
1137 phases.remotephasessummary(repo,
1136 remotesubset,
1138 remotesubset,
1137 remotephases)
1139 remotephases)
1138 timer(d)
1140 timer(d)
1139 fm.end()
1141 fm.end()
1140
1142
1141 @command(b'perfmanifest',[
1143 @command(b'perfmanifest',[
1142 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1144 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1143 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1145 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1144 ] + formatteropts, b'REV|NODE')
1146 ] + formatteropts, b'REV|NODE')
1145 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1147 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1146 """benchmark the time to read a manifest from disk and return a usable
1148 """benchmark the time to read a manifest from disk and return a usable
1147 dict-like object
1149 dict-like object
1148
1150
1149 Manifest caches are cleared before retrieval."""
1151 Manifest caches are cleared before retrieval."""
1150 opts = _byteskwargs(opts)
1152 opts = _byteskwargs(opts)
1151 timer, fm = gettimer(ui, opts)
1153 timer, fm = gettimer(ui, opts)
1152 if not manifest_rev:
1154 if not manifest_rev:
1153 ctx = scmutil.revsingle(repo, rev, rev)
1155 ctx = scmutil.revsingle(repo, rev, rev)
1154 t = ctx.manifestnode()
1156 t = ctx.manifestnode()
1155 else:
1157 else:
1156 from mercurial.node import bin
1158 from mercurial.node import bin
1157
1159
1158 if len(rev) == 40:
1160 if len(rev) == 40:
1159 t = bin(rev)
1161 t = bin(rev)
1160 else:
1162 else:
1161 try:
1163 try:
1162 rev = int(rev)
1164 rev = int(rev)
1163
1165
1164 if util.safehasattr(repo.manifestlog, b'getstorage'):
1166 if util.safehasattr(repo.manifestlog, b'getstorage'):
1165 t = repo.manifestlog.getstorage(b'').node(rev)
1167 t = repo.manifestlog.getstorage(b'').node(rev)
1166 else:
1168 else:
1167 t = repo.manifestlog._revlog.lookup(rev)
1169 t = repo.manifestlog._revlog.lookup(rev)
1168 except ValueError:
1170 except ValueError:
1169 raise error.Abort(b'manifest revision must be integer or full '
1171 raise error.Abort(b'manifest revision must be integer or full '
1170 b'node')
1172 b'node')
1171 def d():
1173 def d():
1172 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1174 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1173 repo.manifestlog[t].read()
1175 repo.manifestlog[t].read()
1174 timer(d)
1176 timer(d)
1175 fm.end()
1177 fm.end()
1176
1178
1177 @command(b'perfchangeset', formatteropts)
1179 @command(b'perfchangeset', formatteropts)
1178 def perfchangeset(ui, repo, rev, **opts):
1180 def perfchangeset(ui, repo, rev, **opts):
1179 opts = _byteskwargs(opts)
1181 opts = _byteskwargs(opts)
1180 timer, fm = gettimer(ui, opts)
1182 timer, fm = gettimer(ui, opts)
1181 n = scmutil.revsingle(repo, rev).node()
1183 n = scmutil.revsingle(repo, rev).node()
1182 def d():
1184 def d():
1183 repo.changelog.read(n)
1185 repo.changelog.read(n)
1184 #repo.changelog._cache = None
1186 #repo.changelog._cache = None
1185 timer(d)
1187 timer(d)
1186 fm.end()
1188 fm.end()
1187
1189
1188 @command(b'perfignore', formatteropts)
1190 @command(b'perfignore', formatteropts)
1189 def perfignore(ui, repo, **opts):
1191 def perfignore(ui, repo, **opts):
1190 """benchmark operation related to computing ignore"""
1192 """benchmark operation related to computing ignore"""
1191 opts = _byteskwargs(opts)
1193 opts = _byteskwargs(opts)
1192 timer, fm = gettimer(ui, opts)
1194 timer, fm = gettimer(ui, opts)
1193 dirstate = repo.dirstate
1195 dirstate = repo.dirstate
1194
1196
1195 def setupone():
1197 def setupone():
1196 dirstate.invalidate()
1198 dirstate.invalidate()
1197 clearfilecache(dirstate, b'_ignore')
1199 clearfilecache(dirstate, b'_ignore')
1198
1200
1199 def runone():
1201 def runone():
1200 dirstate._ignore
1202 dirstate._ignore
1201
1203
1202 timer(runone, setup=setupone, title=b"load")
1204 timer(runone, setup=setupone, title=b"load")
1203 fm.end()
1205 fm.end()
1204
1206
1205 @command(b'perfindex', [
1207 @command(b'perfindex', [
1206 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1208 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1207 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1209 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1208 ] + formatteropts)
1210 ] + formatteropts)
1209 def perfindex(ui, repo, **opts):
1211 def perfindex(ui, repo, **opts):
1210 """benchmark index creation time followed by a lookup
1212 """benchmark index creation time followed by a lookup
1211
1213
1212 The default is to look `tip` up. Depending on the index implementation,
1214 The default is to look `tip` up. Depending on the index implementation,
1213 the revision looked up can matters. For example, an implementation
1215 the revision looked up can matters. For example, an implementation
1214 scanning the index will have a faster lookup time for `--rev tip` than for
1216 scanning the index will have a faster lookup time for `--rev tip` than for
1215 `--rev 0`. The number of looked up revisions and their order can also
1217 `--rev 0`. The number of looked up revisions and their order can also
1216 matters.
1218 matters.
1217
1219
1218 Example of useful set to test:
1220 Example of useful set to test:
1219 * tip
1221 * tip
1220 * 0
1222 * 0
1221 * -10:
1223 * -10:
1222 * :10
1224 * :10
1223 * -10: + :10
1225 * -10: + :10
1224 * :10: + -10:
1226 * :10: + -10:
1225 * -10000:
1227 * -10000:
1226 * -10000: + 0
1228 * -10000: + 0
1227
1229
1228 It is not currently possible to check for lookup of a missing node. For
1230 It is not currently possible to check for lookup of a missing node. For
1229 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1231 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1230 import mercurial.revlog
1232 import mercurial.revlog
1231 opts = _byteskwargs(opts)
1233 opts = _byteskwargs(opts)
1232 timer, fm = gettimer(ui, opts)
1234 timer, fm = gettimer(ui, opts)
1233 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1235 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1234 if opts[b'no_lookup']:
1236 if opts[b'no_lookup']:
1235 if opts['rev']:
1237 if opts['rev']:
1236 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1238 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1237 nodes = []
1239 nodes = []
1238 elif not opts[b'rev']:
1240 elif not opts[b'rev']:
1239 nodes = [repo[b"tip"].node()]
1241 nodes = [repo[b"tip"].node()]
1240 else:
1242 else:
1241 revs = scmutil.revrange(repo, opts[b'rev'])
1243 revs = scmutil.revrange(repo, opts[b'rev'])
1242 cl = repo.changelog
1244 cl = repo.changelog
1243 nodes = [cl.node(r) for r in revs]
1245 nodes = [cl.node(r) for r in revs]
1244
1246
1245 unfi = repo.unfiltered()
1247 unfi = repo.unfiltered()
1246 # find the filecache func directly
1248 # find the filecache func directly
1247 # This avoid polluting the benchmark with the filecache logic
1249 # This avoid polluting the benchmark with the filecache logic
1248 makecl = unfi.__class__.changelog.func
1250 makecl = unfi.__class__.changelog.func
1249 def setup():
1251 def setup():
1250 # probably not necessary, but for good measure
1252 # probably not necessary, but for good measure
1251 clearchangelog(unfi)
1253 clearchangelog(unfi)
1252 def d():
1254 def d():
1253 cl = makecl(unfi)
1255 cl = makecl(unfi)
1254 for n in nodes:
1256 for n in nodes:
1255 cl.rev(n)
1257 cl.rev(n)
1256 timer(d, setup=setup)
1258 timer(d, setup=setup)
1257 fm.end()
1259 fm.end()
1258
1260
1259 @command(b'perfnodemap', [
1261 @command(b'perfnodemap', [
1260 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1262 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1261 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1263 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1262 ] + formatteropts)
1264 ] + formatteropts)
1263 def perfnodemap(ui, repo, **opts):
1265 def perfnodemap(ui, repo, **opts):
1264 """benchmark the time necessary to look up revision from a cold nodemap
1266 """benchmark the time necessary to look up revision from a cold nodemap
1265
1267
1266 Depending on the implementation, the amount and order of revision we look
1268 Depending on the implementation, the amount and order of revision we look
1267 up can varies. Example of useful set to test:
1269 up can varies. Example of useful set to test:
1268 * tip
1270 * tip
1269 * 0
1271 * 0
1270 * -10:
1272 * -10:
1271 * :10
1273 * :10
1272 * -10: + :10
1274 * -10: + :10
1273 * :10: + -10:
1275 * :10: + -10:
1274 * -10000:
1276 * -10000:
1275 * -10000: + 0
1277 * -10000: + 0
1276
1278
1277 The command currently focus on valid binary lookup. Benchmarking for
1279 The command currently focus on valid binary lookup. Benchmarking for
1278 hexlookup, prefix lookup and missing lookup would also be valuable.
1280 hexlookup, prefix lookup and missing lookup would also be valuable.
1279 """
1281 """
1280 import mercurial.revlog
1282 import mercurial.revlog
1281 opts = _byteskwargs(opts)
1283 opts = _byteskwargs(opts)
1282 timer, fm = gettimer(ui, opts)
1284 timer, fm = gettimer(ui, opts)
1283 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1285 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1284
1286
1285 unfi = repo.unfiltered()
1287 unfi = repo.unfiltered()
1286 clearcaches = opts['clear_caches']
1288 clearcaches = opts['clear_caches']
1287 # find the filecache func directly
1289 # find the filecache func directly
1288 # This avoid polluting the benchmark with the filecache logic
1290 # This avoid polluting the benchmark with the filecache logic
1289 makecl = unfi.__class__.changelog.func
1291 makecl = unfi.__class__.changelog.func
1290 if not opts[b'rev']:
1292 if not opts[b'rev']:
1291 raise error.Abort('use --rev to specify revisions to look up')
1293 raise error.Abort('use --rev to specify revisions to look up')
1292 revs = scmutil.revrange(repo, opts[b'rev'])
1294 revs = scmutil.revrange(repo, opts[b'rev'])
1293 cl = repo.changelog
1295 cl = repo.changelog
1294 nodes = [cl.node(r) for r in revs]
1296 nodes = [cl.node(r) for r in revs]
1295
1297
1296 # use a list to pass reference to a nodemap from one closure to the next
1298 # use a list to pass reference to a nodemap from one closure to the next
1297 nodeget = [None]
1299 nodeget = [None]
1298 def setnodeget():
1300 def setnodeget():
1299 # probably not necessary, but for good measure
1301 # probably not necessary, but for good measure
1300 clearchangelog(unfi)
1302 clearchangelog(unfi)
1301 nodeget[0] = makecl(unfi).nodemap.get
1303 nodeget[0] = makecl(unfi).nodemap.get
1302
1304
1303 def d():
1305 def d():
1304 get = nodeget[0]
1306 get = nodeget[0]
1305 for n in nodes:
1307 for n in nodes:
1306 get(n)
1308 get(n)
1307
1309
1308 setup = None
1310 setup = None
1309 if clearcaches:
1311 if clearcaches:
1310 def setup():
1312 def setup():
1311 setnodeget()
1313 setnodeget()
1312 else:
1314 else:
1313 setnodeget()
1315 setnodeget()
1314 d() # prewarm the data structure
1316 d() # prewarm the data structure
1315 timer(d, setup=setup)
1317 timer(d, setup=setup)
1316 fm.end()
1318 fm.end()
1317
1319
1318 @command(b'perfstartup', formatteropts)
1320 @command(b'perfstartup', formatteropts)
1319 def perfstartup(ui, repo, **opts):
1321 def perfstartup(ui, repo, **opts):
1320 opts = _byteskwargs(opts)
1322 opts = _byteskwargs(opts)
1321 timer, fm = gettimer(ui, opts)
1323 timer, fm = gettimer(ui, opts)
1322 def d():
1324 def d():
1323 if os.name != r'nt':
1325 if os.name != r'nt':
1324 os.system(b"HGRCPATH= %s version -q > /dev/null" %
1326 os.system(b"HGRCPATH= %s version -q > /dev/null" %
1325 fsencode(sys.argv[0]))
1327 fsencode(sys.argv[0]))
1326 else:
1328 else:
1327 os.environ[r'HGRCPATH'] = r' '
1329 os.environ[r'HGRCPATH'] = r' '
1328 os.system(r"%s version -q > NUL" % sys.argv[0])
1330 os.system(r"%s version -q > NUL" % sys.argv[0])
1329 timer(d)
1331 timer(d)
1330 fm.end()
1332 fm.end()
1331
1333
1332 @command(b'perfparents', formatteropts)
1334 @command(b'perfparents', formatteropts)
1333 def perfparents(ui, repo, **opts):
1335 def perfparents(ui, repo, **opts):
1334 """benchmark the time necessary to fetch one changeset's parents.
1336 """benchmark the time necessary to fetch one changeset's parents.
1335
1337
1336 The fetch is done using the `node identifier`, traversing all object layers
1338 The fetch is done using the `node identifier`, traversing all object layers
1337 from the repository object. The first N revisions will be used for this
1339 from the repository object. The first N revisions will be used for this
1338 benchmark. N is controlled by the ``perf.parentscount`` config option
1340 benchmark. N is controlled by the ``perf.parentscount`` config option
1339 (default: 1000).
1341 (default: 1000).
1340 """
1342 """
1341 opts = _byteskwargs(opts)
1343 opts = _byteskwargs(opts)
1342 timer, fm = gettimer(ui, opts)
1344 timer, fm = gettimer(ui, opts)
1343 # control the number of commits perfparents iterates over
1345 # control the number of commits perfparents iterates over
1344 # experimental config: perf.parentscount
1346 # experimental config: perf.parentscount
1345 count = getint(ui, b"perf", b"parentscount", 1000)
1347 count = getint(ui, b"perf", b"parentscount", 1000)
1346 if len(repo.changelog) < count:
1348 if len(repo.changelog) < count:
1347 raise error.Abort(b"repo needs %d commits for this test" % count)
1349 raise error.Abort(b"repo needs %d commits for this test" % count)
1348 repo = repo.unfiltered()
1350 repo = repo.unfiltered()
1349 nl = [repo.changelog.node(i) for i in _xrange(count)]
1351 nl = [repo.changelog.node(i) for i in _xrange(count)]
1350 def d():
1352 def d():
1351 for n in nl:
1353 for n in nl:
1352 repo.changelog.parents(n)
1354 repo.changelog.parents(n)
1353 timer(d)
1355 timer(d)
1354 fm.end()
1356 fm.end()
1355
1357
1356 @command(b'perfctxfiles', formatteropts)
1358 @command(b'perfctxfiles', formatteropts)
1357 def perfctxfiles(ui, repo, x, **opts):
1359 def perfctxfiles(ui, repo, x, **opts):
1358 opts = _byteskwargs(opts)
1360 opts = _byteskwargs(opts)
1359 x = int(x)
1361 x = int(x)
1360 timer, fm = gettimer(ui, opts)
1362 timer, fm = gettimer(ui, opts)
1361 def d():
1363 def d():
1362 len(repo[x].files())
1364 len(repo[x].files())
1363 timer(d)
1365 timer(d)
1364 fm.end()
1366 fm.end()
1365
1367
1366 @command(b'perfrawfiles', formatteropts)
1368 @command(b'perfrawfiles', formatteropts)
1367 def perfrawfiles(ui, repo, x, **opts):
1369 def perfrawfiles(ui, repo, x, **opts):
1368 opts = _byteskwargs(opts)
1370 opts = _byteskwargs(opts)
1369 x = int(x)
1371 x = int(x)
1370 timer, fm = gettimer(ui, opts)
1372 timer, fm = gettimer(ui, opts)
1371 cl = repo.changelog
1373 cl = repo.changelog
1372 def d():
1374 def d():
1373 len(cl.read(x)[3])
1375 len(cl.read(x)[3])
1374 timer(d)
1376 timer(d)
1375 fm.end()
1377 fm.end()
1376
1378
1377 @command(b'perflookup', formatteropts)
1379 @command(b'perflookup', formatteropts)
1378 def perflookup(ui, repo, rev, **opts):
1380 def perflookup(ui, repo, rev, **opts):
1379 opts = _byteskwargs(opts)
1381 opts = _byteskwargs(opts)
1380 timer, fm = gettimer(ui, opts)
1382 timer, fm = gettimer(ui, opts)
1381 timer(lambda: len(repo.lookup(rev)))
1383 timer(lambda: len(repo.lookup(rev)))
1382 fm.end()
1384 fm.end()
1383
1385
1384 @command(b'perflinelogedits',
1386 @command(b'perflinelogedits',
1385 [(b'n', b'edits', 10000, b'number of edits'),
1387 [(b'n', b'edits', 10000, b'number of edits'),
1386 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1388 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1387 ], norepo=True)
1389 ], norepo=True)
1388 def perflinelogedits(ui, **opts):
1390 def perflinelogedits(ui, **opts):
1389 from mercurial import linelog
1391 from mercurial import linelog
1390
1392
1391 opts = _byteskwargs(opts)
1393 opts = _byteskwargs(opts)
1392
1394
1393 edits = opts[b'edits']
1395 edits = opts[b'edits']
1394 maxhunklines = opts[b'max_hunk_lines']
1396 maxhunklines = opts[b'max_hunk_lines']
1395
1397
1396 maxb1 = 100000
1398 maxb1 = 100000
1397 random.seed(0)
1399 random.seed(0)
1398 randint = random.randint
1400 randint = random.randint
1399 currentlines = 0
1401 currentlines = 0
1400 arglist = []
1402 arglist = []
1401 for rev in _xrange(edits):
1403 for rev in _xrange(edits):
1402 a1 = randint(0, currentlines)
1404 a1 = randint(0, currentlines)
1403 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1405 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1404 b1 = randint(0, maxb1)
1406 b1 = randint(0, maxb1)
1405 b2 = randint(b1, b1 + maxhunklines)
1407 b2 = randint(b1, b1 + maxhunklines)
1406 currentlines += (b2 - b1) - (a2 - a1)
1408 currentlines += (b2 - b1) - (a2 - a1)
1407 arglist.append((rev, a1, a2, b1, b2))
1409 arglist.append((rev, a1, a2, b1, b2))
1408
1410
1409 def d():
1411 def d():
1410 ll = linelog.linelog()
1412 ll = linelog.linelog()
1411 for args in arglist:
1413 for args in arglist:
1412 ll.replacelines(*args)
1414 ll.replacelines(*args)
1413
1415
1414 timer, fm = gettimer(ui, opts)
1416 timer, fm = gettimer(ui, opts)
1415 timer(d)
1417 timer(d)
1416 fm.end()
1418 fm.end()
1417
1419
1418 @command(b'perfrevrange', formatteropts)
1420 @command(b'perfrevrange', formatteropts)
1419 def perfrevrange(ui, repo, *specs, **opts):
1421 def perfrevrange(ui, repo, *specs, **opts):
1420 opts = _byteskwargs(opts)
1422 opts = _byteskwargs(opts)
1421 timer, fm = gettimer(ui, opts)
1423 timer, fm = gettimer(ui, opts)
1422 revrange = scmutil.revrange
1424 revrange = scmutil.revrange
1423 timer(lambda: len(revrange(repo, specs)))
1425 timer(lambda: len(revrange(repo, specs)))
1424 fm.end()
1426 fm.end()
1425
1427
1426 @command(b'perfnodelookup', formatteropts)
1428 @command(b'perfnodelookup', formatteropts)
1427 def perfnodelookup(ui, repo, rev, **opts):
1429 def perfnodelookup(ui, repo, rev, **opts):
1428 opts = _byteskwargs(opts)
1430 opts = _byteskwargs(opts)
1429 timer, fm = gettimer(ui, opts)
1431 timer, fm = gettimer(ui, opts)
1430 import mercurial.revlog
1432 import mercurial.revlog
1431 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1433 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1432 n = scmutil.revsingle(repo, rev).node()
1434 n = scmutil.revsingle(repo, rev).node()
1433 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1435 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1434 def d():
1436 def d():
1435 cl.rev(n)
1437 cl.rev(n)
1436 clearcaches(cl)
1438 clearcaches(cl)
1437 timer(d)
1439 timer(d)
1438 fm.end()
1440 fm.end()
1439
1441
1440 @command(b'perflog',
1442 @command(b'perflog',
1441 [(b'', b'rename', False, b'ask log to follow renames')
1443 [(b'', b'rename', False, b'ask log to follow renames')
1442 ] + formatteropts)
1444 ] + formatteropts)
1443 def perflog(ui, repo, rev=None, **opts):
1445 def perflog(ui, repo, rev=None, **opts):
1444 opts = _byteskwargs(opts)
1446 opts = _byteskwargs(opts)
1445 if rev is None:
1447 if rev is None:
1446 rev=[]
1448 rev=[]
1447 timer, fm = gettimer(ui, opts)
1449 timer, fm = gettimer(ui, opts)
1448 ui.pushbuffer()
1450 ui.pushbuffer()
1449 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1451 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1450 copies=opts.get(b'rename')))
1452 copies=opts.get(b'rename')))
1451 ui.popbuffer()
1453 ui.popbuffer()
1452 fm.end()
1454 fm.end()
1453
1455
1454 @command(b'perfmoonwalk', formatteropts)
1456 @command(b'perfmoonwalk', formatteropts)
1455 def perfmoonwalk(ui, repo, **opts):
1457 def perfmoonwalk(ui, repo, **opts):
1456 """benchmark walking the changelog backwards
1458 """benchmark walking the changelog backwards
1457
1459
1458 This also loads the changelog data for each revision in the changelog.
1460 This also loads the changelog data for each revision in the changelog.
1459 """
1461 """
1460 opts = _byteskwargs(opts)
1462 opts = _byteskwargs(opts)
1461 timer, fm = gettimer(ui, opts)
1463 timer, fm = gettimer(ui, opts)
1462 def moonwalk():
1464 def moonwalk():
1463 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1465 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1464 ctx = repo[i]
1466 ctx = repo[i]
1465 ctx.branch() # read changelog data (in addition to the index)
1467 ctx.branch() # read changelog data (in addition to the index)
1466 timer(moonwalk)
1468 timer(moonwalk)
1467 fm.end()
1469 fm.end()
1468
1470
1469 @command(b'perftemplating',
1471 @command(b'perftemplating',
1470 [(b'r', b'rev', [], b'revisions to run the template on'),
1472 [(b'r', b'rev', [], b'revisions to run the template on'),
1471 ] + formatteropts)
1473 ] + formatteropts)
1472 def perftemplating(ui, repo, testedtemplate=None, **opts):
1474 def perftemplating(ui, repo, testedtemplate=None, **opts):
1473 """test the rendering time of a given template"""
1475 """test the rendering time of a given template"""
1474 if makelogtemplater is None:
1476 if makelogtemplater is None:
1475 raise error.Abort((b"perftemplating not available with this Mercurial"),
1477 raise error.Abort((b"perftemplating not available with this Mercurial"),
1476 hint=b"use 4.3 or later")
1478 hint=b"use 4.3 or later")
1477
1479
1478 opts = _byteskwargs(opts)
1480 opts = _byteskwargs(opts)
1479
1481
1480 nullui = ui.copy()
1482 nullui = ui.copy()
1481 nullui.fout = open(os.devnull, r'wb')
1483 nullui.fout = open(os.devnull, r'wb')
1482 nullui.disablepager()
1484 nullui.disablepager()
1483 revs = opts.get(b'rev')
1485 revs = opts.get(b'rev')
1484 if not revs:
1486 if not revs:
1485 revs = [b'all()']
1487 revs = [b'all()']
1486 revs = list(scmutil.revrange(repo, revs))
1488 revs = list(scmutil.revrange(repo, revs))
1487
1489
1488 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1490 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1489 b' {author|person}: {desc|firstline}\n')
1491 b' {author|person}: {desc|firstline}\n')
1490 if testedtemplate is None:
1492 if testedtemplate is None:
1491 testedtemplate = defaulttemplate
1493 testedtemplate = defaulttemplate
1492 displayer = makelogtemplater(nullui, repo, testedtemplate)
1494 displayer = makelogtemplater(nullui, repo, testedtemplate)
1493 def format():
1495 def format():
1494 for r in revs:
1496 for r in revs:
1495 ctx = repo[r]
1497 ctx = repo[r]
1496 displayer.show(ctx)
1498 displayer.show(ctx)
1497 displayer.flush(ctx)
1499 displayer.flush(ctx)
1498
1500
1499 timer, fm = gettimer(ui, opts)
1501 timer, fm = gettimer(ui, opts)
1500 timer(format)
1502 timer(format)
1501 fm.end()
1503 fm.end()
1502
1504
1503 @command(b'perfhelper-mergecopies', formatteropts +
1505 @command(b'perfhelper-mergecopies', formatteropts +
1504 [
1506 [
1505 (b'r', b'revs', [], b'restrict search to these revisions'),
1507 (b'r', b'revs', [], b'restrict search to these revisions'),
1506 (b'', b'timing', False, b'provides extra data (costly)'),
1508 (b'', b'timing', False, b'provides extra data (costly)'),
1507 ])
1509 ])
1508 def perfhelpermergecopies(ui, repo, revs=[], **opts):
1510 def perfhelpermergecopies(ui, repo, revs=[], **opts):
1509 """find statistics about potential parameters for `perfmergecopies`
1511 """find statistics about potential parameters for `perfmergecopies`
1510
1512
1511 This command find (base, p1, p2) triplet relevant for copytracing
1513 This command find (base, p1, p2) triplet relevant for copytracing
1512 benchmarking in the context of a merge. It reports values for some of the
1514 benchmarking in the context of a merge. It reports values for some of the
1513 parameters that impact merge copy tracing time during merge.
1515 parameters that impact merge copy tracing time during merge.
1514
1516
1515 If `--timing` is set, rename detection is run and the associated timing
1517 If `--timing` is set, rename detection is run and the associated timing
1516 will be reported. The extra details come at the cost of slower command
1518 will be reported. The extra details come at the cost of slower command
1517 execution.
1519 execution.
1518
1520
1519 Since rename detection is only run once, other factors might easily
1521 Since rename detection is only run once, other factors might easily
1520 affect the precision of the timing. However it should give a good
1522 affect the precision of the timing. However it should give a good
1521 approximation of which revision triplets are very costly.
1523 approximation of which revision triplets are very costly.
1522 """
1524 """
1523 opts = _byteskwargs(opts)
1525 opts = _byteskwargs(opts)
1524 fm = ui.formatter(b'perf', opts)
1526 fm = ui.formatter(b'perf', opts)
1525 dotiming = opts[b'timing']
1527 dotiming = opts[b'timing']
1526
1528
1527 output_template = [
1529 output_template = [
1528 ("base", "%(base)12s"),
1530 ("base", "%(base)12s"),
1529 ("p1", "%(p1.node)12s"),
1531 ("p1", "%(p1.node)12s"),
1530 ("p2", "%(p2.node)12s"),
1532 ("p2", "%(p2.node)12s"),
1531 ("p1.nb-revs", "%(p1.nbrevs)12d"),
1533 ("p1.nb-revs", "%(p1.nbrevs)12d"),
1532 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
1534 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
1533 ("p1.renames", "%(p1.renamedfiles)12d"),
1535 ("p1.renames", "%(p1.renamedfiles)12d"),
1534 ("p1.time", "%(p1.time)12.3f"),
1536 ("p1.time", "%(p1.time)12.3f"),
1535 ("p2.nb-revs", "%(p2.nbrevs)12d"),
1537 ("p2.nb-revs", "%(p2.nbrevs)12d"),
1536 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
1538 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
1537 ("p2.renames", "%(p2.renamedfiles)12d"),
1539 ("p2.renames", "%(p2.renamedfiles)12d"),
1538 ("p2.time", "%(p2.time)12.3f"),
1540 ("p2.time", "%(p2.time)12.3f"),
1539 ("renames", "%(nbrenamedfiles)12d"),
1541 ("renames", "%(nbrenamedfiles)12d"),
1540 ("total.time", "%(time)12.3f"),
1542 ("total.time", "%(time)12.3f"),
1541 ]
1543 ]
1542 if not dotiming:
1544 if not dotiming:
1543 output_template = [i for i in output_template
1545 output_template = [i for i in output_template
1544 if not ('time' in i[0] or 'renames' in i[0])]
1546 if not ('time' in i[0] or 'renames' in i[0])]
1545 header_names = [h for (h, v) in output_template]
1547 header_names = [h for (h, v) in output_template]
1546 output = ' '.join([v for (h, v) in output_template]) + '\n'
1548 output = ' '.join([v for (h, v) in output_template]) + '\n'
1547 header = ' '.join(['%12s'] * len(header_names)) + '\n'
1549 header = ' '.join(['%12s'] * len(header_names)) + '\n'
1548 fm.plain(header % tuple(header_names))
1550 fm.plain(header % tuple(header_names))
1549
1551
1550 if not revs:
1552 if not revs:
1551 revs = ['all()']
1553 revs = ['all()']
1552 revs = scmutil.revrange(repo, revs)
1554 revs = scmutil.revrange(repo, revs)
1553
1555
1554 roi = repo.revs('merge() and %ld', revs)
1556 roi = repo.revs('merge() and %ld', revs)
1555 for r in roi:
1557 for r in roi:
1556 ctx = repo[r]
1558 ctx = repo[r]
1557 p1 = ctx.p1()
1559 p1 = ctx.p1()
1558 p2 = ctx.p2()
1560 p2 = ctx.p2()
1559 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
1561 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
1560 for b in bases:
1562 for b in bases:
1561 b = repo[b]
1563 b = repo[b]
1562 p1missing = copies._computeforwardmissing(b, p1)
1564 p1missing = copies._computeforwardmissing(b, p1)
1563 p2missing = copies._computeforwardmissing(b, p2)
1565 p2missing = copies._computeforwardmissing(b, p2)
1564 data = {
1566 data = {
1565 b'base': b.hex(),
1567 b'base': b.hex(),
1566 b'p1.node': p1.hex(),
1568 b'p1.node': p1.hex(),
1567 b'p1.nbrevs': len(repo.revs('%d::%d', b.rev(), p1.rev())),
1569 b'p1.nbrevs': len(repo.revs('%d::%d', b.rev(), p1.rev())),
1568 b'p1.nbmissingfiles': len(p1missing),
1570 b'p1.nbmissingfiles': len(p1missing),
1569 b'p2.node': p2.hex(),
1571 b'p2.node': p2.hex(),
1570 b'p2.nbrevs': len(repo.revs('%d::%d', b.rev(), p2.rev())),
1572 b'p2.nbrevs': len(repo.revs('%d::%d', b.rev(), p2.rev())),
1571 b'p2.nbmissingfiles': len(p2missing),
1573 b'p2.nbmissingfiles': len(p2missing),
1572 }
1574 }
1573 if dotiming:
1575 if dotiming:
1574 begin = util.timer()
1576 begin = util.timer()
1575 mergedata = copies.mergecopies(repo, p1, p2, b)
1577 mergedata = copies.mergecopies(repo, p1, p2, b)
1576 end = util.timer()
1578 end = util.timer()
1577 # not very stable timing since we did only one run
1579 # not very stable timing since we did only one run
1578 data['time'] = end - begin
1580 data['time'] = end - begin
1579 # mergedata contains five dicts: "copy", "movewithdir",
1581 # mergedata contains five dicts: "copy", "movewithdir",
1580 # "diverge", "renamedelete" and "dirmove".
1582 # "diverge", "renamedelete" and "dirmove".
1581 # The first 4 are about renamed file so lets count that.
1583 # The first 4 are about renamed file so lets count that.
1582 renames = len(mergedata[0])
1584 renames = len(mergedata[0])
1583 renames += len(mergedata[1])
1585 renames += len(mergedata[1])
1584 renames += len(mergedata[2])
1586 renames += len(mergedata[2])
1585 renames += len(mergedata[3])
1587 renames += len(mergedata[3])
1586 data['nbrenamedfiles'] = renames
1588 data['nbrenamedfiles'] = renames
1587 begin = util.timer()
1589 begin = util.timer()
1588 p1renames = copies.pathcopies(b, p1)
1590 p1renames = copies.pathcopies(b, p1)
1589 end = util.timer()
1591 end = util.timer()
1590 data['p1.time'] = end - begin
1592 data['p1.time'] = end - begin
1591 begin = util.timer()
1593 begin = util.timer()
1592 p2renames = copies.pathcopies(b, p2)
1594 p2renames = copies.pathcopies(b, p2)
1593 data['p2.time'] = end - begin
1595 data['p2.time'] = end - begin
1594 end = util.timer()
1596 end = util.timer()
1595 data['p1.renamedfiles'] = len(p1renames)
1597 data['p1.renamedfiles'] = len(p1renames)
1596 data['p2.renamedfiles'] = len(p2renames)
1598 data['p2.renamedfiles'] = len(p2renames)
1597 fm.startitem()
1599 fm.startitem()
1598 fm.data(**data)
1600 fm.data(**data)
1599 # make node pretty for the human output
1601 # make node pretty for the human output
1600 out = data.copy()
1602 out = data.copy()
1601 out['base'] = fm.hexfunc(b.node())
1603 out['base'] = fm.hexfunc(b.node())
1602 out['p1.node'] = fm.hexfunc(p1.node())
1604 out['p1.node'] = fm.hexfunc(p1.node())
1603 out['p2.node'] = fm.hexfunc(p2.node())
1605 out['p2.node'] = fm.hexfunc(p2.node())
1604 fm.plain(output % out)
1606 fm.plain(output % out)
1605
1607
1606 fm.end()
1608 fm.end()
1607
1609
1608 @command(b'perfhelper-pathcopies', formatteropts +
1610 @command(b'perfhelper-pathcopies', formatteropts +
1609 [
1611 [
1610 (b'r', b'revs', [], b'restrict search to these revisions'),
1612 (b'r', b'revs', [], b'restrict search to these revisions'),
1611 (b'', b'timing', False, b'provides extra data (costly)'),
1613 (b'', b'timing', False, b'provides extra data (costly)'),
1612 ])
1614 ])
1613 def perfhelperpathcopies(ui, repo, revs=[], **opts):
1615 def perfhelperpathcopies(ui, repo, revs=[], **opts):
1614 """find statistic about potential parameters for the `perftracecopies`
1616 """find statistic about potential parameters for the `perftracecopies`
1615
1617
1616 This command find source-destination pair relevant for copytracing testing.
1618 This command find source-destination pair relevant for copytracing testing.
1617 It report value for some of the parameters that impact copy tracing time.
1619 It report value for some of the parameters that impact copy tracing time.
1618
1620
1619 If `--timing` is set, rename detection is run and the associated timing
1621 If `--timing` is set, rename detection is run and the associated timing
1620 will be reported. The extra details comes at the cost of a slower command
1622 will be reported. The extra details comes at the cost of a slower command
1621 execution.
1623 execution.
1622
1624
1623 Since the rename detection is only run once, other factors might easily
1625 Since the rename detection is only run once, other factors might easily
1624 affect the precision of the timing. However it should give a good
1626 affect the precision of the timing. However it should give a good
1625 approximation of which revision pairs are very costly.
1627 approximation of which revision pairs are very costly.
1626 """
1628 """
1627 opts = _byteskwargs(opts)
1629 opts = _byteskwargs(opts)
1628 fm = ui.formatter(b'perf', opts)
1630 fm = ui.formatter(b'perf', opts)
1629 dotiming = opts[b'timing']
1631 dotiming = opts[b'timing']
1630
1632
1631 if dotiming:
1633 if dotiming:
1632 header = '%12s %12s %12s %12s %12s %12s\n'
1634 header = '%12s %12s %12s %12s %12s %12s\n'
1633 output = ("%(source)12s %(destination)12s "
1635 output = ("%(source)12s %(destination)12s "
1634 "%(nbrevs)12d %(nbmissingfiles)12d "
1636 "%(nbrevs)12d %(nbmissingfiles)12d "
1635 "%(nbrenamedfiles)12d %(time)18.5f\n")
1637 "%(nbrenamedfiles)12d %(time)18.5f\n")
1636 header_names = ("source", "destination", "nb-revs", "nb-files",
1638 header_names = ("source", "destination", "nb-revs", "nb-files",
1637 "nb-renames", "time")
1639 "nb-renames", "time")
1638 fm.plain(header % header_names)
1640 fm.plain(header % header_names)
1639 else:
1641 else:
1640 header = '%12s %12s %12s %12s\n'
1642 header = '%12s %12s %12s %12s\n'
1641 output = ("%(source)12s %(destination)12s "
1643 output = ("%(source)12s %(destination)12s "
1642 "%(nbrevs)12d %(nbmissingfiles)12d\n")
1644 "%(nbrevs)12d %(nbmissingfiles)12d\n")
1643 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
1645 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
1644
1646
1645 if not revs:
1647 if not revs:
1646 revs = ['all()']
1648 revs = ['all()']
1647 revs = scmutil.revrange(repo, revs)
1649 revs = scmutil.revrange(repo, revs)
1648
1650
1649 roi = repo.revs('merge() and %ld', revs)
1651 roi = repo.revs('merge() and %ld', revs)
1650 for r in roi:
1652 for r in roi:
1651 ctx = repo[r]
1653 ctx = repo[r]
1652 p1 = ctx.p1().rev()
1654 p1 = ctx.p1().rev()
1653 p2 = ctx.p2().rev()
1655 p2 = ctx.p2().rev()
1654 bases = repo.changelog._commonancestorsheads(p1, p2)
1656 bases = repo.changelog._commonancestorsheads(p1, p2)
1655 for p in (p1, p2):
1657 for p in (p1, p2):
1656 for b in bases:
1658 for b in bases:
1657 base = repo[b]
1659 base = repo[b]
1658 parent = repo[p]
1660 parent = repo[p]
1659 missing = copies._computeforwardmissing(base, parent)
1661 missing = copies._computeforwardmissing(base, parent)
1660 if not missing:
1662 if not missing:
1661 continue
1663 continue
1662 data = {
1664 data = {
1663 b'source': base.hex(),
1665 b'source': base.hex(),
1664 b'destination': parent.hex(),
1666 b'destination': parent.hex(),
1665 b'nbrevs': len(repo.revs('%d::%d', b, p)),
1667 b'nbrevs': len(repo.revs('%d::%d', b, p)),
1666 b'nbmissingfiles': len(missing),
1668 b'nbmissingfiles': len(missing),
1667 }
1669 }
1668 if dotiming:
1670 if dotiming:
1669 begin = util.timer()
1671 begin = util.timer()
1670 renames = copies.pathcopies(base, parent)
1672 renames = copies.pathcopies(base, parent)
1671 end = util.timer()
1673 end = util.timer()
1672 # not very stable timing since we did only one run
1674 # not very stable timing since we did only one run
1673 data['time'] = end - begin
1675 data['time'] = end - begin
1674 data['nbrenamedfiles'] = len(renames)
1676 data['nbrenamedfiles'] = len(renames)
1675 fm.startitem()
1677 fm.startitem()
1676 fm.data(**data)
1678 fm.data(**data)
1677 out = data.copy()
1679 out = data.copy()
1678 out['source'] = fm.hexfunc(base.node())
1680 out['source'] = fm.hexfunc(base.node())
1679 out['destination'] = fm.hexfunc(parent.node())
1681 out['destination'] = fm.hexfunc(parent.node())
1680 fm.plain(output % out)
1682 fm.plain(output % out)
1681
1683
1682 fm.end()
1684 fm.end()
1683
1685
1684 @command(b'perfcca', formatteropts)
1686 @command(b'perfcca', formatteropts)
1685 def perfcca(ui, repo, **opts):
1687 def perfcca(ui, repo, **opts):
1686 opts = _byteskwargs(opts)
1688 opts = _byteskwargs(opts)
1687 timer, fm = gettimer(ui, opts)
1689 timer, fm = gettimer(ui, opts)
1688 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1690 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1689 fm.end()
1691 fm.end()
1690
1692
1691 @command(b'perffncacheload', formatteropts)
1693 @command(b'perffncacheload', formatteropts)
1692 def perffncacheload(ui, repo, **opts):
1694 def perffncacheload(ui, repo, **opts):
1693 opts = _byteskwargs(opts)
1695 opts = _byteskwargs(opts)
1694 timer, fm = gettimer(ui, opts)
1696 timer, fm = gettimer(ui, opts)
1695 s = repo.store
1697 s = repo.store
1696 def d():
1698 def d():
1697 s.fncache._load()
1699 s.fncache._load()
1698 timer(d)
1700 timer(d)
1699 fm.end()
1701 fm.end()
1700
1702
1701 @command(b'perffncachewrite', formatteropts)
1703 @command(b'perffncachewrite', formatteropts)
1702 def perffncachewrite(ui, repo, **opts):
1704 def perffncachewrite(ui, repo, **opts):
1703 opts = _byteskwargs(opts)
1705 opts = _byteskwargs(opts)
1704 timer, fm = gettimer(ui, opts)
1706 timer, fm = gettimer(ui, opts)
1705 s = repo.store
1707 s = repo.store
1706 lock = repo.lock()
1708 lock = repo.lock()
1707 s.fncache._load()
1709 s.fncache._load()
1708 tr = repo.transaction(b'perffncachewrite')
1710 tr = repo.transaction(b'perffncachewrite')
1709 tr.addbackup(b'fncache')
1711 tr.addbackup(b'fncache')
1710 def d():
1712 def d():
1711 s.fncache._dirty = True
1713 s.fncache._dirty = True
1712 s.fncache.write(tr)
1714 s.fncache.write(tr)
1713 timer(d)
1715 timer(d)
1714 tr.close()
1716 tr.close()
1715 lock.release()
1717 lock.release()
1716 fm.end()
1718 fm.end()
1717
1719
1718 @command(b'perffncacheencode', formatteropts)
1720 @command(b'perffncacheencode', formatteropts)
1719 def perffncacheencode(ui, repo, **opts):
1721 def perffncacheencode(ui, repo, **opts):
1720 opts = _byteskwargs(opts)
1722 opts = _byteskwargs(opts)
1721 timer, fm = gettimer(ui, opts)
1723 timer, fm = gettimer(ui, opts)
1722 s = repo.store
1724 s = repo.store
1723 s.fncache._load()
1725 s.fncache._load()
1724 def d():
1726 def d():
1725 for p in s.fncache.entries:
1727 for p in s.fncache.entries:
1726 s.encode(p)
1728 s.encode(p)
1727 timer(d)
1729 timer(d)
1728 fm.end()
1730 fm.end()
1729
1731
1730 def _bdiffworker(q, blocks, xdiff, ready, done):
1732 def _bdiffworker(q, blocks, xdiff, ready, done):
1731 while not done.is_set():
1733 while not done.is_set():
1732 pair = q.get()
1734 pair = q.get()
1733 while pair is not None:
1735 while pair is not None:
1734 if xdiff:
1736 if xdiff:
1735 mdiff.bdiff.xdiffblocks(*pair)
1737 mdiff.bdiff.xdiffblocks(*pair)
1736 elif blocks:
1738 elif blocks:
1737 mdiff.bdiff.blocks(*pair)
1739 mdiff.bdiff.blocks(*pair)
1738 else:
1740 else:
1739 mdiff.textdiff(*pair)
1741 mdiff.textdiff(*pair)
1740 q.task_done()
1742 q.task_done()
1741 pair = q.get()
1743 pair = q.get()
1742 q.task_done() # for the None one
1744 q.task_done() # for the None one
1743 with ready:
1745 with ready:
1744 ready.wait()
1746 ready.wait()
1745
1747
1746 def _manifestrevision(repo, mnode):
1748 def _manifestrevision(repo, mnode):
1747 ml = repo.manifestlog
1749 ml = repo.manifestlog
1748
1750
1749 if util.safehasattr(ml, b'getstorage'):
1751 if util.safehasattr(ml, b'getstorage'):
1750 store = ml.getstorage(b'')
1752 store = ml.getstorage(b'')
1751 else:
1753 else:
1752 store = ml._revlog
1754 store = ml._revlog
1753
1755
1754 return store.revision(mnode)
1756 return store.revision(mnode)
1755
1757
1756 @command(b'perfbdiff', revlogopts + formatteropts + [
1758 @command(b'perfbdiff', revlogopts + formatteropts + [
1757 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1759 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1758 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1760 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1759 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1761 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1760 (b'', b'blocks', False, b'test computing diffs into blocks'),
1762 (b'', b'blocks', False, b'test computing diffs into blocks'),
1761 (b'', b'xdiff', False, b'use xdiff algorithm'),
1763 (b'', b'xdiff', False, b'use xdiff algorithm'),
1762 ],
1764 ],
1763
1765
1764 b'-c|-m|FILE REV')
1766 b'-c|-m|FILE REV')
1765 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1767 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1766 """benchmark a bdiff between revisions
1768 """benchmark a bdiff between revisions
1767
1769
1768 By default, benchmark a bdiff between its delta parent and itself.
1770 By default, benchmark a bdiff between its delta parent and itself.
1769
1771
1770 With ``--count``, benchmark bdiffs between delta parents and self for N
1772 With ``--count``, benchmark bdiffs between delta parents and self for N
1771 revisions starting at the specified revision.
1773 revisions starting at the specified revision.
1772
1774
1773 With ``--alldata``, assume the requested revision is a changeset and
1775 With ``--alldata``, assume the requested revision is a changeset and
1774 measure bdiffs for all changes related to that changeset (manifest
1776 measure bdiffs for all changes related to that changeset (manifest
1775 and filelogs).
1777 and filelogs).
1776 """
1778 """
1777 opts = _byteskwargs(opts)
1779 opts = _byteskwargs(opts)
1778
1780
1779 if opts[b'xdiff'] and not opts[b'blocks']:
1781 if opts[b'xdiff'] and not opts[b'blocks']:
1780 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1782 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1781
1783
1782 if opts[b'alldata']:
1784 if opts[b'alldata']:
1783 opts[b'changelog'] = True
1785 opts[b'changelog'] = True
1784
1786
1785 if opts.get(b'changelog') or opts.get(b'manifest'):
1787 if opts.get(b'changelog') or opts.get(b'manifest'):
1786 file_, rev = None, file_
1788 file_, rev = None, file_
1787 elif rev is None:
1789 elif rev is None:
1788 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1790 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1789
1791
1790 blocks = opts[b'blocks']
1792 blocks = opts[b'blocks']
1791 xdiff = opts[b'xdiff']
1793 xdiff = opts[b'xdiff']
1792 textpairs = []
1794 textpairs = []
1793
1795
1794 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1796 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1795
1797
1796 startrev = r.rev(r.lookup(rev))
1798 startrev = r.rev(r.lookup(rev))
1797 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1799 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1798 if opts[b'alldata']:
1800 if opts[b'alldata']:
1799 # Load revisions associated with changeset.
1801 # Load revisions associated with changeset.
1800 ctx = repo[rev]
1802 ctx = repo[rev]
1801 mtext = _manifestrevision(repo, ctx.manifestnode())
1803 mtext = _manifestrevision(repo, ctx.manifestnode())
1802 for pctx in ctx.parents():
1804 for pctx in ctx.parents():
1803 pman = _manifestrevision(repo, pctx.manifestnode())
1805 pman = _manifestrevision(repo, pctx.manifestnode())
1804 textpairs.append((pman, mtext))
1806 textpairs.append((pman, mtext))
1805
1807
1806 # Load filelog revisions by iterating manifest delta.
1808 # Load filelog revisions by iterating manifest delta.
1807 man = ctx.manifest()
1809 man = ctx.manifest()
1808 pman = ctx.p1().manifest()
1810 pman = ctx.p1().manifest()
1809 for filename, change in pman.diff(man).items():
1811 for filename, change in pman.diff(man).items():
1810 fctx = repo.file(filename)
1812 fctx = repo.file(filename)
1811 f1 = fctx.revision(change[0][0] or -1)
1813 f1 = fctx.revision(change[0][0] or -1)
1812 f2 = fctx.revision(change[1][0] or -1)
1814 f2 = fctx.revision(change[1][0] or -1)
1813 textpairs.append((f1, f2))
1815 textpairs.append((f1, f2))
1814 else:
1816 else:
1815 dp = r.deltaparent(rev)
1817 dp = r.deltaparent(rev)
1816 textpairs.append((r.revision(dp), r.revision(rev)))
1818 textpairs.append((r.revision(dp), r.revision(rev)))
1817
1819
1818 withthreads = threads > 0
1820 withthreads = threads > 0
1819 if not withthreads:
1821 if not withthreads:
1820 def d():
1822 def d():
1821 for pair in textpairs:
1823 for pair in textpairs:
1822 if xdiff:
1824 if xdiff:
1823 mdiff.bdiff.xdiffblocks(*pair)
1825 mdiff.bdiff.xdiffblocks(*pair)
1824 elif blocks:
1826 elif blocks:
1825 mdiff.bdiff.blocks(*pair)
1827 mdiff.bdiff.blocks(*pair)
1826 else:
1828 else:
1827 mdiff.textdiff(*pair)
1829 mdiff.textdiff(*pair)
1828 else:
1830 else:
1829 q = queue()
1831 q = queue()
1830 for i in _xrange(threads):
1832 for i in _xrange(threads):
1831 q.put(None)
1833 q.put(None)
1832 ready = threading.Condition()
1834 ready = threading.Condition()
1833 done = threading.Event()
1835 done = threading.Event()
1834 for i in _xrange(threads):
1836 for i in _xrange(threads):
1835 threading.Thread(target=_bdiffworker,
1837 threading.Thread(target=_bdiffworker,
1836 args=(q, blocks, xdiff, ready, done)).start()
1838 args=(q, blocks, xdiff, ready, done)).start()
1837 q.join()
1839 q.join()
1838 def d():
1840 def d():
1839 for pair in textpairs:
1841 for pair in textpairs:
1840 q.put(pair)
1842 q.put(pair)
1841 for i in _xrange(threads):
1843 for i in _xrange(threads):
1842 q.put(None)
1844 q.put(None)
1843 with ready:
1845 with ready:
1844 ready.notify_all()
1846 ready.notify_all()
1845 q.join()
1847 q.join()
1846 timer, fm = gettimer(ui, opts)
1848 timer, fm = gettimer(ui, opts)
1847 timer(d)
1849 timer(d)
1848 fm.end()
1850 fm.end()
1849
1851
1850 if withthreads:
1852 if withthreads:
1851 done.set()
1853 done.set()
1852 for i in _xrange(threads):
1854 for i in _xrange(threads):
1853 q.put(None)
1855 q.put(None)
1854 with ready:
1856 with ready:
1855 ready.notify_all()
1857 ready.notify_all()
1856
1858
1857 @command(b'perfunidiff', revlogopts + formatteropts + [
1859 @command(b'perfunidiff', revlogopts + formatteropts + [
1858 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1860 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1859 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
1861 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
1860 ], b'-c|-m|FILE REV')
1862 ], b'-c|-m|FILE REV')
1861 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1863 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1862 """benchmark a unified diff between revisions
1864 """benchmark a unified diff between revisions
1863
1865
1864 This doesn't include any copy tracing - it's just a unified diff
1866 This doesn't include any copy tracing - it's just a unified diff
1865 of the texts.
1867 of the texts.
1866
1868
1867 By default, benchmark a diff between its delta parent and itself.
1869 By default, benchmark a diff between its delta parent and itself.
1868
1870
1869 With ``--count``, benchmark diffs between delta parents and self for N
1871 With ``--count``, benchmark diffs between delta parents and self for N
1870 revisions starting at the specified revision.
1872 revisions starting at the specified revision.
1871
1873
1872 With ``--alldata``, assume the requested revision is a changeset and
1874 With ``--alldata``, assume the requested revision is a changeset and
1873 measure diffs for all changes related to that changeset (manifest
1875 measure diffs for all changes related to that changeset (manifest
1874 and filelogs).
1876 and filelogs).
1875 """
1877 """
1876 opts = _byteskwargs(opts)
1878 opts = _byteskwargs(opts)
1877 if opts[b'alldata']:
1879 if opts[b'alldata']:
1878 opts[b'changelog'] = True
1880 opts[b'changelog'] = True
1879
1881
1880 if opts.get(b'changelog') or opts.get(b'manifest'):
1882 if opts.get(b'changelog') or opts.get(b'manifest'):
1881 file_, rev = None, file_
1883 file_, rev = None, file_
1882 elif rev is None:
1884 elif rev is None:
1883 raise error.CommandError(b'perfunidiff', b'invalid arguments')
1885 raise error.CommandError(b'perfunidiff', b'invalid arguments')
1884
1886
1885 textpairs = []
1887 textpairs = []
1886
1888
1887 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
1889 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
1888
1890
1889 startrev = r.rev(r.lookup(rev))
1891 startrev = r.rev(r.lookup(rev))
1890 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1892 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1891 if opts[b'alldata']:
1893 if opts[b'alldata']:
1892 # Load revisions associated with changeset.
1894 # Load revisions associated with changeset.
1893 ctx = repo[rev]
1895 ctx = repo[rev]
1894 mtext = _manifestrevision(repo, ctx.manifestnode())
1896 mtext = _manifestrevision(repo, ctx.manifestnode())
1895 for pctx in ctx.parents():
1897 for pctx in ctx.parents():
1896 pman = _manifestrevision(repo, pctx.manifestnode())
1898 pman = _manifestrevision(repo, pctx.manifestnode())
1897 textpairs.append((pman, mtext))
1899 textpairs.append((pman, mtext))
1898
1900
1899 # Load filelog revisions by iterating manifest delta.
1901 # Load filelog revisions by iterating manifest delta.
1900 man = ctx.manifest()
1902 man = ctx.manifest()
1901 pman = ctx.p1().manifest()
1903 pman = ctx.p1().manifest()
1902 for filename, change in pman.diff(man).items():
1904 for filename, change in pman.diff(man).items():
1903 fctx = repo.file(filename)
1905 fctx = repo.file(filename)
1904 f1 = fctx.revision(change[0][0] or -1)
1906 f1 = fctx.revision(change[0][0] or -1)
1905 f2 = fctx.revision(change[1][0] or -1)
1907 f2 = fctx.revision(change[1][0] or -1)
1906 textpairs.append((f1, f2))
1908 textpairs.append((f1, f2))
1907 else:
1909 else:
1908 dp = r.deltaparent(rev)
1910 dp = r.deltaparent(rev)
1909 textpairs.append((r.revision(dp), r.revision(rev)))
1911 textpairs.append((r.revision(dp), r.revision(rev)))
1910
1912
1911 def d():
1913 def d():
1912 for left, right in textpairs:
1914 for left, right in textpairs:
1913 # The date strings don't matter, so we pass empty strings.
1915 # The date strings don't matter, so we pass empty strings.
1914 headerlines, hunks = mdiff.unidiff(
1916 headerlines, hunks = mdiff.unidiff(
1915 left, b'', right, b'', b'left', b'right', binary=False)
1917 left, b'', right, b'', b'left', b'right', binary=False)
1916 # consume iterators in roughly the way patch.py does
1918 # consume iterators in roughly the way patch.py does
1917 b'\n'.join(headerlines)
1919 b'\n'.join(headerlines)
1918 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1920 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1919 timer, fm = gettimer(ui, opts)
1921 timer, fm = gettimer(ui, opts)
1920 timer(d)
1922 timer(d)
1921 fm.end()
1923 fm.end()
1922
1924
1923 @command(b'perfdiffwd', formatteropts)
1925 @command(b'perfdiffwd', formatteropts)
1924 def perfdiffwd(ui, repo, **opts):
1926 def perfdiffwd(ui, repo, **opts):
1925 """Profile diff of working directory changes"""
1927 """Profile diff of working directory changes"""
1926 opts = _byteskwargs(opts)
1928 opts = _byteskwargs(opts)
1927 timer, fm = gettimer(ui, opts)
1929 timer, fm = gettimer(ui, opts)
1928 options = {
1930 options = {
1929 'w': 'ignore_all_space',
1931 'w': 'ignore_all_space',
1930 'b': 'ignore_space_change',
1932 'b': 'ignore_space_change',
1931 'B': 'ignore_blank_lines',
1933 'B': 'ignore_blank_lines',
1932 }
1934 }
1933
1935
1934 for diffopt in ('', 'w', 'b', 'B', 'wB'):
1936 for diffopt in ('', 'w', 'b', 'B', 'wB'):
1935 opts = dict((options[c], b'1') for c in diffopt)
1937 opts = dict((options[c], b'1') for c in diffopt)
1936 def d():
1938 def d():
1937 ui.pushbuffer()
1939 ui.pushbuffer()
1938 commands.diff(ui, repo, **opts)
1940 commands.diff(ui, repo, **opts)
1939 ui.popbuffer()
1941 ui.popbuffer()
1940 diffopt = diffopt.encode('ascii')
1942 diffopt = diffopt.encode('ascii')
1941 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
1943 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
1942 timer(d, title=title)
1944 timer(d, title=title)
1943 fm.end()
1945 fm.end()
1944
1946
1945 @command(b'perfrevlogindex', revlogopts + formatteropts,
1947 @command(b'perfrevlogindex', revlogopts + formatteropts,
1946 b'-c|-m|FILE')
1948 b'-c|-m|FILE')
1947 def perfrevlogindex(ui, repo, file_=None, **opts):
1949 def perfrevlogindex(ui, repo, file_=None, **opts):
1948 """Benchmark operations against a revlog index.
1950 """Benchmark operations against a revlog index.
1949
1951
1950 This tests constructing a revlog instance, reading index data,
1952 This tests constructing a revlog instance, reading index data,
1951 parsing index data, and performing various operations related to
1953 parsing index data, and performing various operations related to
1952 index data.
1954 index data.
1953 """
1955 """
1954
1956
1955 opts = _byteskwargs(opts)
1957 opts = _byteskwargs(opts)
1956
1958
1957 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
1959 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
1958
1960
1959 opener = getattr(rl, 'opener') # trick linter
1961 opener = getattr(rl, 'opener') # trick linter
1960 indexfile = rl.indexfile
1962 indexfile = rl.indexfile
1961 data = opener.read(indexfile)
1963 data = opener.read(indexfile)
1962
1964
1963 header = struct.unpack(b'>I', data[0:4])[0]
1965 header = struct.unpack(b'>I', data[0:4])[0]
1964 version = header & 0xFFFF
1966 version = header & 0xFFFF
1965 if version == 1:
1967 if version == 1:
1966 revlogio = revlog.revlogio()
1968 revlogio = revlog.revlogio()
1967 inline = header & (1 << 16)
1969 inline = header & (1 << 16)
1968 else:
1970 else:
1969 raise error.Abort((b'unsupported revlog version: %d') % version)
1971 raise error.Abort((b'unsupported revlog version: %d') % version)
1970
1972
1971 rllen = len(rl)
1973 rllen = len(rl)
1972
1974
1973 node0 = rl.node(0)
1975 node0 = rl.node(0)
1974 node25 = rl.node(rllen // 4)
1976 node25 = rl.node(rllen // 4)
1975 node50 = rl.node(rllen // 2)
1977 node50 = rl.node(rllen // 2)
1976 node75 = rl.node(rllen // 4 * 3)
1978 node75 = rl.node(rllen // 4 * 3)
1977 node100 = rl.node(rllen - 1)
1979 node100 = rl.node(rllen - 1)
1978
1980
1979 allrevs = range(rllen)
1981 allrevs = range(rllen)
1980 allrevsrev = list(reversed(allrevs))
1982 allrevsrev = list(reversed(allrevs))
1981 allnodes = [rl.node(rev) for rev in range(rllen)]
1983 allnodes = [rl.node(rev) for rev in range(rllen)]
1982 allnodesrev = list(reversed(allnodes))
1984 allnodesrev = list(reversed(allnodes))
1983
1985
1984 def constructor():
1986 def constructor():
1985 revlog.revlog(opener, indexfile)
1987 revlog.revlog(opener, indexfile)
1986
1988
1987 def read():
1989 def read():
1988 with opener(indexfile) as fh:
1990 with opener(indexfile) as fh:
1989 fh.read()
1991 fh.read()
1990
1992
1991 def parseindex():
1993 def parseindex():
1992 revlogio.parseindex(data, inline)
1994 revlogio.parseindex(data, inline)
1993
1995
1994 def getentry(revornode):
1996 def getentry(revornode):
1995 index = revlogio.parseindex(data, inline)[0]
1997 index = revlogio.parseindex(data, inline)[0]
1996 index[revornode]
1998 index[revornode]
1997
1999
1998 def getentries(revs, count=1):
2000 def getentries(revs, count=1):
1999 index = revlogio.parseindex(data, inline)[0]
2001 index = revlogio.parseindex(data, inline)[0]
2000
2002
2001 for i in range(count):
2003 for i in range(count):
2002 for rev in revs:
2004 for rev in revs:
2003 index[rev]
2005 index[rev]
2004
2006
2005 def resolvenode(node):
2007 def resolvenode(node):
2006 nodemap = revlogio.parseindex(data, inline)[1]
2008 nodemap = revlogio.parseindex(data, inline)[1]
2007 # This only works for the C code.
2009 # This only works for the C code.
2008 if nodemap is None:
2010 if nodemap is None:
2009 return
2011 return
2010
2012
2011 try:
2013 try:
2012 nodemap[node]
2014 nodemap[node]
2013 except error.RevlogError:
2015 except error.RevlogError:
2014 pass
2016 pass
2015
2017
2016 def resolvenodes(nodes, count=1):
2018 def resolvenodes(nodes, count=1):
2017 nodemap = revlogio.parseindex(data, inline)[1]
2019 nodemap = revlogio.parseindex(data, inline)[1]
2018 if nodemap is None:
2020 if nodemap is None:
2019 return
2021 return
2020
2022
2021 for i in range(count):
2023 for i in range(count):
2022 for node in nodes:
2024 for node in nodes:
2023 try:
2025 try:
2024 nodemap[node]
2026 nodemap[node]
2025 except error.RevlogError:
2027 except error.RevlogError:
2026 pass
2028 pass
2027
2029
2028 benches = [
2030 benches = [
2029 (constructor, b'revlog constructor'),
2031 (constructor, b'revlog constructor'),
2030 (read, b'read'),
2032 (read, b'read'),
2031 (parseindex, b'create index object'),
2033 (parseindex, b'create index object'),
2032 (lambda: getentry(0), b'retrieve index entry for rev 0'),
2034 (lambda: getentry(0), b'retrieve index entry for rev 0'),
2033 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
2035 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
2034 (lambda: resolvenode(node0), b'look up node at rev 0'),
2036 (lambda: resolvenode(node0), b'look up node at rev 0'),
2035 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
2037 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
2036 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
2038 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
2037 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
2039 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
2038 (lambda: resolvenode(node100), b'look up node at tip'),
2040 (lambda: resolvenode(node100), b'look up node at tip'),
2039 # 2x variation is to measure caching impact.
2041 # 2x variation is to measure caching impact.
2040 (lambda: resolvenodes(allnodes),
2042 (lambda: resolvenodes(allnodes),
2041 b'look up all nodes (forward)'),
2043 b'look up all nodes (forward)'),
2042 (lambda: resolvenodes(allnodes, 2),
2044 (lambda: resolvenodes(allnodes, 2),
2043 b'look up all nodes 2x (forward)'),
2045 b'look up all nodes 2x (forward)'),
2044 (lambda: resolvenodes(allnodesrev),
2046 (lambda: resolvenodes(allnodesrev),
2045 b'look up all nodes (reverse)'),
2047 b'look up all nodes (reverse)'),
2046 (lambda: resolvenodes(allnodesrev, 2),
2048 (lambda: resolvenodes(allnodesrev, 2),
2047 b'look up all nodes 2x (reverse)'),
2049 b'look up all nodes 2x (reverse)'),
2048 (lambda: getentries(allrevs),
2050 (lambda: getentries(allrevs),
2049 b'retrieve all index entries (forward)'),
2051 b'retrieve all index entries (forward)'),
2050 (lambda: getentries(allrevs, 2),
2052 (lambda: getentries(allrevs, 2),
2051 b'retrieve all index entries 2x (forward)'),
2053 b'retrieve all index entries 2x (forward)'),
2052 (lambda: getentries(allrevsrev),
2054 (lambda: getentries(allrevsrev),
2053 b'retrieve all index entries (reverse)'),
2055 b'retrieve all index entries (reverse)'),
2054 (lambda: getentries(allrevsrev, 2),
2056 (lambda: getentries(allrevsrev, 2),
2055 b'retrieve all index entries 2x (reverse)'),
2057 b'retrieve all index entries 2x (reverse)'),
2056 ]
2058 ]
2057
2059
2058 for fn, title in benches:
2060 for fn, title in benches:
2059 timer, fm = gettimer(ui, opts)
2061 timer, fm = gettimer(ui, opts)
2060 timer(fn, title=title)
2062 timer(fn, title=title)
2061 fm.end()
2063 fm.end()
2062
2064
2063 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
2065 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
2064 [(b'd', b'dist', 100, b'distance between the revisions'),
2066 [(b'd', b'dist', 100, b'distance between the revisions'),
2065 (b's', b'startrev', 0, b'revision to start reading at'),
2067 (b's', b'startrev', 0, b'revision to start reading at'),
2066 (b'', b'reverse', False, b'read in reverse')],
2068 (b'', b'reverse', False, b'read in reverse')],
2067 b'-c|-m|FILE')
2069 b'-c|-m|FILE')
2068 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
2070 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
2069 **opts):
2071 **opts):
2070 """Benchmark reading a series of revisions from a revlog.
2072 """Benchmark reading a series of revisions from a revlog.
2071
2073
2072 By default, we read every ``-d/--dist`` revision from 0 to tip of
2074 By default, we read every ``-d/--dist`` revision from 0 to tip of
2073 the specified revlog.
2075 the specified revlog.
2074
2076
2075 The start revision can be defined via ``-s/--startrev``.
2077 The start revision can be defined via ``-s/--startrev``.
2076 """
2078 """
2077 opts = _byteskwargs(opts)
2079 opts = _byteskwargs(opts)
2078
2080
2079 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
2081 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
2080 rllen = getlen(ui)(rl)
2082 rllen = getlen(ui)(rl)
2081
2083
2082 if startrev < 0:
2084 if startrev < 0:
2083 startrev = rllen + startrev
2085 startrev = rllen + startrev
2084
2086
2085 def d():
2087 def d():
2086 rl.clearcaches()
2088 rl.clearcaches()
2087
2089
2088 beginrev = startrev
2090 beginrev = startrev
2089 endrev = rllen
2091 endrev = rllen
2090 dist = opts[b'dist']
2092 dist = opts[b'dist']
2091
2093
2092 if reverse:
2094 if reverse:
2093 beginrev, endrev = endrev - 1, beginrev - 1
2095 beginrev, endrev = endrev - 1, beginrev - 1
2094 dist = -1 * dist
2096 dist = -1 * dist
2095
2097
2096 for x in _xrange(beginrev, endrev, dist):
2098 for x in _xrange(beginrev, endrev, dist):
2097 # Old revisions don't support passing int.
2099 # Old revisions don't support passing int.
2098 n = rl.node(x)
2100 n = rl.node(x)
2099 rl.revision(n)
2101 rl.revision(n)
2100
2102
2101 timer, fm = gettimer(ui, opts)
2103 timer, fm = gettimer(ui, opts)
2102 timer(d)
2104 timer(d)
2103 fm.end()
2105 fm.end()
2104
2106
2105 @command(b'perfrevlogwrite', revlogopts + formatteropts +
2107 @command(b'perfrevlogwrite', revlogopts + formatteropts +
2106 [(b's', b'startrev', 1000, b'revision to start writing at'),
2108 [(b's', b'startrev', 1000, b'revision to start writing at'),
2107 (b'', b'stoprev', -1, b'last revision to write'),
2109 (b'', b'stoprev', -1, b'last revision to write'),
2108 (b'', b'count', 3, b'number of passes to perform'),
2110 (b'', b'count', 3, b'number of passes to perform'),
2109 (b'', b'details', False, b'print timing for every revisions tested'),
2111 (b'', b'details', False, b'print timing for every revisions tested'),
2110 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
2112 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
2111 (b'', b'lazydeltabase', True, b'try the provided delta first'),
2113 (b'', b'lazydeltabase', True, b'try the provided delta first'),
2112 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
2114 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
2113 ],
2115 ],
2114 b'-c|-m|FILE')
2116 b'-c|-m|FILE')
2115 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
2117 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
2116 """Benchmark writing a series of revisions to a revlog.
2118 """Benchmark writing a series of revisions to a revlog.
2117
2119
2118 Possible source values are:
2120 Possible source values are:
2119 * `full`: add from a full text (default).
2121 * `full`: add from a full text (default).
2120 * `parent-1`: add from a delta to the first parent
2122 * `parent-1`: add from a delta to the first parent
2121 * `parent-2`: add from a delta to the second parent if it exists
2123 * `parent-2`: add from a delta to the second parent if it exists
2122 (use a delta from the first parent otherwise)
2124 (use a delta from the first parent otherwise)
2123 * `parent-smallest`: add from the smallest delta (either p1 or p2)
2125 * `parent-smallest`: add from the smallest delta (either p1 or p2)
2124 * `storage`: add from the existing precomputed deltas
2126 * `storage`: add from the existing precomputed deltas
2125
2127
2126 Note: This performance command measures performance in a custom way. As a
2128 Note: This performance command measures performance in a custom way. As a
2127 result some of the global configuration of the 'perf' command does not
2129 result some of the global configuration of the 'perf' command does not
2128 apply to it:
2130 apply to it:
2129
2131
2130 * ``pre-run``: disabled
2132 * ``pre-run``: disabled
2131
2133
2132 * ``profile-benchmark``: disabled
2134 * ``profile-benchmark``: disabled
2133
2135
2134 * ``run-limits``: disabled use --count instead
2136 * ``run-limits``: disabled use --count instead
2135 """
2137 """
2136 opts = _byteskwargs(opts)
2138 opts = _byteskwargs(opts)
2137
2139
2138 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
2140 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
2139 rllen = getlen(ui)(rl)
2141 rllen = getlen(ui)(rl)
2140 if startrev < 0:
2142 if startrev < 0:
2141 startrev = rllen + startrev
2143 startrev = rllen + startrev
2142 if stoprev < 0:
2144 if stoprev < 0:
2143 stoprev = rllen + stoprev
2145 stoprev = rllen + stoprev
2144
2146
2145 lazydeltabase = opts['lazydeltabase']
2147 lazydeltabase = opts['lazydeltabase']
2146 source = opts['source']
2148 source = opts['source']
2147 clearcaches = opts['clear_caches']
2149 clearcaches = opts['clear_caches']
2148 validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest',
2150 validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest',
2149 b'storage')
2151 b'storage')
2150 if source not in validsource:
2152 if source not in validsource:
2151 raise error.Abort('invalid source type: %s' % source)
2153 raise error.Abort('invalid source type: %s' % source)
2152
2154
2153 ### actually gather results
2155 ### actually gather results
2154 count = opts['count']
2156 count = opts['count']
2155 if count <= 0:
2157 if count <= 0:
2156 raise error.Abort('invalide run count: %d' % count)
2158 raise error.Abort('invalide run count: %d' % count)
2157 allresults = []
2159 allresults = []
2158 for c in range(count):
2160 for c in range(count):
2159 timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1,
2161 timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1,
2160 lazydeltabase=lazydeltabase,
2162 lazydeltabase=lazydeltabase,
2161 clearcaches=clearcaches)
2163 clearcaches=clearcaches)
2162 allresults.append(timing)
2164 allresults.append(timing)
2163
2165
2164 ### consolidate the results in a single list
2166 ### consolidate the results in a single list
2165 results = []
2167 results = []
2166 for idx, (rev, t) in enumerate(allresults[0]):
2168 for idx, (rev, t) in enumerate(allresults[0]):
2167 ts = [t]
2169 ts = [t]
2168 for other in allresults[1:]:
2170 for other in allresults[1:]:
2169 orev, ot = other[idx]
2171 orev, ot = other[idx]
2170 assert orev == rev
2172 assert orev == rev
2171 ts.append(ot)
2173 ts.append(ot)
2172 results.append((rev, ts))
2174 results.append((rev, ts))
2173 resultcount = len(results)
2175 resultcount = len(results)
2174
2176
2175 ### Compute and display relevant statistics
2177 ### Compute and display relevant statistics
2176
2178
2177 # get a formatter
2179 # get a formatter
2178 fm = ui.formatter(b'perf', opts)
2180 fm = ui.formatter(b'perf', opts)
2179 displayall = ui.configbool(b"perf", b"all-timing", False)
2181 displayall = ui.configbool(b"perf", b"all-timing", False)
2180
2182
2181 # print individual details if requested
2183 # print individual details if requested
2182 if opts['details']:
2184 if opts['details']:
2183 for idx, item in enumerate(results, 1):
2185 for idx, item in enumerate(results, 1):
2184 rev, data = item
2186 rev, data = item
2185 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
2187 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
2186 formatone(fm, data, title=title, displayall=displayall)
2188 formatone(fm, data, title=title, displayall=displayall)
2187
2189
2188 # sorts results by median time
2190 # sorts results by median time
2189 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
2191 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
2190 # list of (name, index) to display)
2192 # list of (name, index) to display)
2191 relevants = [
2193 relevants = [
2192 ("min", 0),
2194 ("min", 0),
2193 ("10%", resultcount * 10 // 100),
2195 ("10%", resultcount * 10 // 100),
2194 ("25%", resultcount * 25 // 100),
2196 ("25%", resultcount * 25 // 100),
2195 ("50%", resultcount * 70 // 100),
2197 ("50%", resultcount * 70 // 100),
2196 ("75%", resultcount * 75 // 100),
2198 ("75%", resultcount * 75 // 100),
2197 ("90%", resultcount * 90 // 100),
2199 ("90%", resultcount * 90 // 100),
2198 ("95%", resultcount * 95 // 100),
2200 ("95%", resultcount * 95 // 100),
2199 ("99%", resultcount * 99 // 100),
2201 ("99%", resultcount * 99 // 100),
2200 ("99.9%", resultcount * 999 // 1000),
2202 ("99.9%", resultcount * 999 // 1000),
2201 ("99.99%", resultcount * 9999 // 10000),
2203 ("99.99%", resultcount * 9999 // 10000),
2202 ("99.999%", resultcount * 99999 // 100000),
2204 ("99.999%", resultcount * 99999 // 100000),
2203 ("max", -1),
2205 ("max", -1),
2204 ]
2206 ]
2205 if not ui.quiet:
2207 if not ui.quiet:
2206 for name, idx in relevants:
2208 for name, idx in relevants:
2207 data = results[idx]
2209 data = results[idx]
2208 title = '%s of %d, rev %d' % (name, resultcount, data[0])
2210 title = '%s of %d, rev %d' % (name, resultcount, data[0])
2209 formatone(fm, data[1], title=title, displayall=displayall)
2211 formatone(fm, data[1], title=title, displayall=displayall)
2210
2212
2211 # XXX summing that many float will not be very precise, we ignore this fact
2213 # XXX summing that many float will not be very precise, we ignore this fact
2212 # for now
2214 # for now
2213 totaltime = []
2215 totaltime = []
2214 for item in allresults:
2216 for item in allresults:
2215 totaltime.append((sum(x[1][0] for x in item),
2217 totaltime.append((sum(x[1][0] for x in item),
2216 sum(x[1][1] for x in item),
2218 sum(x[1][1] for x in item),
2217 sum(x[1][2] for x in item),)
2219 sum(x[1][2] for x in item),)
2218 )
2220 )
2219 formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
2221 formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
2220 displayall=displayall)
2222 displayall=displayall)
2221 fm.end()
2223 fm.end()
2222
2224
2223 class _faketr(object):
2225 class _faketr(object):
2224 def add(s, x, y, z=None):
2226 def add(s, x, y, z=None):
2225 return None
2227 return None
2226
2228
2227 def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None,
2229 def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None,
2228 lazydeltabase=True, clearcaches=True):
2230 lazydeltabase=True, clearcaches=True):
2229 timings = []
2231 timings = []
2230 tr = _faketr()
2232 tr = _faketr()
2231 with _temprevlog(ui, orig, startrev) as dest:
2233 with _temprevlog(ui, orig, startrev) as dest:
2232 dest._lazydeltabase = lazydeltabase
2234 dest._lazydeltabase = lazydeltabase
2233 revs = list(orig.revs(startrev, stoprev))
2235 revs = list(orig.revs(startrev, stoprev))
2234 total = len(revs)
2236 total = len(revs)
2235 topic = 'adding'
2237 topic = 'adding'
2236 if runidx is not None:
2238 if runidx is not None:
2237 topic += ' (run #%d)' % runidx
2239 topic += ' (run #%d)' % runidx
2238 # Support both old and new progress API
2240 # Support both old and new progress API
2239 if util.safehasattr(ui, 'makeprogress'):
2241 if util.safehasattr(ui, 'makeprogress'):
2240 progress = ui.makeprogress(topic, unit='revs', total=total)
2242 progress = ui.makeprogress(topic, unit='revs', total=total)
2241 def updateprogress(pos):
2243 def updateprogress(pos):
2242 progress.update(pos)
2244 progress.update(pos)
2243 def completeprogress():
2245 def completeprogress():
2244 progress.complete()
2246 progress.complete()
2245 else:
2247 else:
2246 def updateprogress(pos):
2248 def updateprogress(pos):
2247 ui.progress(topic, pos, unit='revs', total=total)
2249 ui.progress(topic, pos, unit='revs', total=total)
2248 def completeprogress():
2250 def completeprogress():
2249 ui.progress(topic, None, unit='revs', total=total)
2251 ui.progress(topic, None, unit='revs', total=total)
2250
2252
2251 for idx, rev in enumerate(revs):
2253 for idx, rev in enumerate(revs):
2252 updateprogress(idx)
2254 updateprogress(idx)
2253 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
2255 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
2254 if clearcaches:
2256 if clearcaches:
2255 dest.index.clearcaches()
2257 dest.index.clearcaches()
2256 dest.clearcaches()
2258 dest.clearcaches()
2257 with timeone() as r:
2259 with timeone() as r:
2258 dest.addrawrevision(*addargs, **addkwargs)
2260 dest.addrawrevision(*addargs, **addkwargs)
2259 timings.append((rev, r[0]))
2261 timings.append((rev, r[0]))
2260 updateprogress(total)
2262 updateprogress(total)
2261 completeprogress()
2263 completeprogress()
2262 return timings
2264 return timings
2263
2265
2264 def _getrevisionseed(orig, rev, tr, source):
2266 def _getrevisionseed(orig, rev, tr, source):
2265 from mercurial.node import nullid
2267 from mercurial.node import nullid
2266
2268
2267 linkrev = orig.linkrev(rev)
2269 linkrev = orig.linkrev(rev)
2268 node = orig.node(rev)
2270 node = orig.node(rev)
2269 p1, p2 = orig.parents(node)
2271 p1, p2 = orig.parents(node)
2270 flags = orig.flags(rev)
2272 flags = orig.flags(rev)
2271 cachedelta = None
2273 cachedelta = None
2272 text = None
2274 text = None
2273
2275
2274 if source == b'full':
2276 if source == b'full':
2275 text = orig.revision(rev)
2277 text = orig.revision(rev)
2276 elif source == b'parent-1':
2278 elif source == b'parent-1':
2277 baserev = orig.rev(p1)
2279 baserev = orig.rev(p1)
2278 cachedelta = (baserev, orig.revdiff(p1, rev))
2280 cachedelta = (baserev, orig.revdiff(p1, rev))
2279 elif source == b'parent-2':
2281 elif source == b'parent-2':
2280 parent = p2
2282 parent = p2
2281 if p2 == nullid:
2283 if p2 == nullid:
2282 parent = p1
2284 parent = p1
2283 baserev = orig.rev(parent)
2285 baserev = orig.rev(parent)
2284 cachedelta = (baserev, orig.revdiff(parent, rev))
2286 cachedelta = (baserev, orig.revdiff(parent, rev))
2285 elif source == b'parent-smallest':
2287 elif source == b'parent-smallest':
2286 p1diff = orig.revdiff(p1, rev)
2288 p1diff = orig.revdiff(p1, rev)
2287 parent = p1
2289 parent = p1
2288 diff = p1diff
2290 diff = p1diff
2289 if p2 != nullid:
2291 if p2 != nullid:
2290 p2diff = orig.revdiff(p2, rev)
2292 p2diff = orig.revdiff(p2, rev)
2291 if len(p1diff) > len(p2diff):
2293 if len(p1diff) > len(p2diff):
2292 parent = p2
2294 parent = p2
2293 diff = p2diff
2295 diff = p2diff
2294 baserev = orig.rev(parent)
2296 baserev = orig.rev(parent)
2295 cachedelta = (baserev, diff)
2297 cachedelta = (baserev, diff)
2296 elif source == b'storage':
2298 elif source == b'storage':
2297 baserev = orig.deltaparent(rev)
2299 baserev = orig.deltaparent(rev)
2298 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
2300 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
2299
2301
2300 return ((text, tr, linkrev, p1, p2),
2302 return ((text, tr, linkrev, p1, p2),
2301 {'node': node, 'flags': flags, 'cachedelta': cachedelta})
2303 {'node': node, 'flags': flags, 'cachedelta': cachedelta})
2302
2304
2303 @contextlib.contextmanager
2305 @contextlib.contextmanager
2304 def _temprevlog(ui, orig, truncaterev):
2306 def _temprevlog(ui, orig, truncaterev):
2305 from mercurial import vfs as vfsmod
2307 from mercurial import vfs as vfsmod
2306
2308
2307 if orig._inline:
2309 if orig._inline:
2308 raise error.Abort('not supporting inline revlog (yet)')
2310 raise error.Abort('not supporting inline revlog (yet)')
2309 revlogkwargs = {}
2311 revlogkwargs = {}
2310 k = 'upperboundcomp'
2312 k = 'upperboundcomp'
2311 if util.safehasattr(orig, k):
2313 if util.safehasattr(orig, k):
2312 revlogkwargs[k] = getattr(orig, k)
2314 revlogkwargs[k] = getattr(orig, k)
2313
2315
2314 origindexpath = orig.opener.join(orig.indexfile)
2316 origindexpath = orig.opener.join(orig.indexfile)
2315 origdatapath = orig.opener.join(orig.datafile)
2317 origdatapath = orig.opener.join(orig.datafile)
2316 indexname = 'revlog.i'
2318 indexname = 'revlog.i'
2317 dataname = 'revlog.d'
2319 dataname = 'revlog.d'
2318
2320
2319 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
2321 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
2320 try:
2322 try:
2321 # copy the data file in a temporary directory
2323 # copy the data file in a temporary directory
2322 ui.debug('copying data in %s\n' % tmpdir)
2324 ui.debug('copying data in %s\n' % tmpdir)
2323 destindexpath = os.path.join(tmpdir, 'revlog.i')
2325 destindexpath = os.path.join(tmpdir, 'revlog.i')
2324 destdatapath = os.path.join(tmpdir, 'revlog.d')
2326 destdatapath = os.path.join(tmpdir, 'revlog.d')
2325 shutil.copyfile(origindexpath, destindexpath)
2327 shutil.copyfile(origindexpath, destindexpath)
2326 shutil.copyfile(origdatapath, destdatapath)
2328 shutil.copyfile(origdatapath, destdatapath)
2327
2329
2328 # remove the data we want to add again
2330 # remove the data we want to add again
2329 ui.debug('truncating data to be rewritten\n')
2331 ui.debug('truncating data to be rewritten\n')
2330 with open(destindexpath, 'ab') as index:
2332 with open(destindexpath, 'ab') as index:
2331 index.seek(0)
2333 index.seek(0)
2332 index.truncate(truncaterev * orig._io.size)
2334 index.truncate(truncaterev * orig._io.size)
2333 with open(destdatapath, 'ab') as data:
2335 with open(destdatapath, 'ab') as data:
2334 data.seek(0)
2336 data.seek(0)
2335 data.truncate(orig.start(truncaterev))
2337 data.truncate(orig.start(truncaterev))
2336
2338
2337 # instantiate a new revlog from the temporary copy
2339 # instantiate a new revlog from the temporary copy
2338 ui.debug('truncating adding to be rewritten\n')
2340 ui.debug('truncating adding to be rewritten\n')
2339 vfs = vfsmod.vfs(tmpdir)
2341 vfs = vfsmod.vfs(tmpdir)
2340 vfs.options = getattr(orig.opener, 'options', None)
2342 vfs.options = getattr(orig.opener, 'options', None)
2341
2343
2342 dest = revlog.revlog(vfs,
2344 dest = revlog.revlog(vfs,
2343 indexfile=indexname,
2345 indexfile=indexname,
2344 datafile=dataname, **revlogkwargs)
2346 datafile=dataname, **revlogkwargs)
2345 if dest._inline:
2347 if dest._inline:
2346 raise error.Abort('not supporting inline revlog (yet)')
2348 raise error.Abort('not supporting inline revlog (yet)')
2347 # make sure internals are initialized
2349 # make sure internals are initialized
2348 dest.revision(len(dest) - 1)
2350 dest.revision(len(dest) - 1)
2349 yield dest
2351 yield dest
2350 del dest, vfs
2352 del dest, vfs
2351 finally:
2353 finally:
2352 shutil.rmtree(tmpdir, True)
2354 shutil.rmtree(tmpdir, True)
2353
2355
2354 @command(b'perfrevlogchunks', revlogopts + formatteropts +
2356 @command(b'perfrevlogchunks', revlogopts + formatteropts +
2355 [(b'e', b'engines', b'', b'compression engines to use'),
2357 [(b'e', b'engines', b'', b'compression engines to use'),
2356 (b's', b'startrev', 0, b'revision to start at')],
2358 (b's', b'startrev', 0, b'revision to start at')],
2357 b'-c|-m|FILE')
2359 b'-c|-m|FILE')
2358 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
2360 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
2359 """Benchmark operations on revlog chunks.
2361 """Benchmark operations on revlog chunks.
2360
2362
2361 Logically, each revlog is a collection of fulltext revisions. However,
2363 Logically, each revlog is a collection of fulltext revisions. However,
2362 stored within each revlog are "chunks" of possibly compressed data. This
2364 stored within each revlog are "chunks" of possibly compressed data. This
2363 data needs to be read and decompressed or compressed and written.
2365 data needs to be read and decompressed or compressed and written.
2364
2366
2365 This command measures the time it takes to read+decompress and recompress
2367 This command measures the time it takes to read+decompress and recompress
2366 chunks in a revlog. It effectively isolates I/O and compression performance.
2368 chunks in a revlog. It effectively isolates I/O and compression performance.
2367 For measurements of higher-level operations like resolving revisions,
2369 For measurements of higher-level operations like resolving revisions,
2368 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
2370 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
2369 """
2371 """
2370 opts = _byteskwargs(opts)
2372 opts = _byteskwargs(opts)
2371
2373
2372 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
2374 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
2373
2375
2374 # _chunkraw was renamed to _getsegmentforrevs.
2376 # _chunkraw was renamed to _getsegmentforrevs.
2375 try:
2377 try:
2376 segmentforrevs = rl._getsegmentforrevs
2378 segmentforrevs = rl._getsegmentforrevs
2377 except AttributeError:
2379 except AttributeError:
2378 segmentforrevs = rl._chunkraw
2380 segmentforrevs = rl._chunkraw
2379
2381
2380 # Verify engines argument.
2382 # Verify engines argument.
2381 if engines:
2383 if engines:
2382 engines = set(e.strip() for e in engines.split(b','))
2384 engines = set(e.strip() for e in engines.split(b','))
2383 for engine in engines:
2385 for engine in engines:
2384 try:
2386 try:
2385 util.compressionengines[engine]
2387 util.compressionengines[engine]
2386 except KeyError:
2388 except KeyError:
2387 raise error.Abort(b'unknown compression engine: %s' % engine)
2389 raise error.Abort(b'unknown compression engine: %s' % engine)
2388 else:
2390 else:
2389 engines = []
2391 engines = []
2390 for e in util.compengines:
2392 for e in util.compengines:
2391 engine = util.compengines[e]
2393 engine = util.compengines[e]
2392 try:
2394 try:
2393 if engine.available():
2395 if engine.available():
2394 engine.revlogcompressor().compress(b'dummy')
2396 engine.revlogcompressor().compress(b'dummy')
2395 engines.append(e)
2397 engines.append(e)
2396 except NotImplementedError:
2398 except NotImplementedError:
2397 pass
2399 pass
2398
2400
2399 revs = list(rl.revs(startrev, len(rl) - 1))
2401 revs = list(rl.revs(startrev, len(rl) - 1))
2400
2402
2401 def rlfh(rl):
2403 def rlfh(rl):
2402 if rl._inline:
2404 if rl._inline:
2403 return getsvfs(repo)(rl.indexfile)
2405 return getsvfs(repo)(rl.indexfile)
2404 else:
2406 else:
2405 return getsvfs(repo)(rl.datafile)
2407 return getsvfs(repo)(rl.datafile)
2406
2408
2407 def doread():
2409 def doread():
2408 rl.clearcaches()
2410 rl.clearcaches()
2409 for rev in revs:
2411 for rev in revs:
2410 segmentforrevs(rev, rev)
2412 segmentforrevs(rev, rev)
2411
2413
2412 def doreadcachedfh():
2414 def doreadcachedfh():
2413 rl.clearcaches()
2415 rl.clearcaches()
2414 fh = rlfh(rl)
2416 fh = rlfh(rl)
2415 for rev in revs:
2417 for rev in revs:
2416 segmentforrevs(rev, rev, df=fh)
2418 segmentforrevs(rev, rev, df=fh)
2417
2419
2418 def doreadbatch():
2420 def doreadbatch():
2419 rl.clearcaches()
2421 rl.clearcaches()
2420 segmentforrevs(revs[0], revs[-1])
2422 segmentforrevs(revs[0], revs[-1])
2421
2423
2422 def doreadbatchcachedfh():
2424 def doreadbatchcachedfh():
2423 rl.clearcaches()
2425 rl.clearcaches()
2424 fh = rlfh(rl)
2426 fh = rlfh(rl)
2425 segmentforrevs(revs[0], revs[-1], df=fh)
2427 segmentforrevs(revs[0], revs[-1], df=fh)
2426
2428
2427 def dochunk():
2429 def dochunk():
2428 rl.clearcaches()
2430 rl.clearcaches()
2429 fh = rlfh(rl)
2431 fh = rlfh(rl)
2430 for rev in revs:
2432 for rev in revs:
2431 rl._chunk(rev, df=fh)
2433 rl._chunk(rev, df=fh)
2432
2434
2433 chunks = [None]
2435 chunks = [None]
2434
2436
2435 def dochunkbatch():
2437 def dochunkbatch():
2436 rl.clearcaches()
2438 rl.clearcaches()
2437 fh = rlfh(rl)
2439 fh = rlfh(rl)
2438 # Save chunks as a side-effect.
2440 # Save chunks as a side-effect.
2439 chunks[0] = rl._chunks(revs, df=fh)
2441 chunks[0] = rl._chunks(revs, df=fh)
2440
2442
2441 def docompress(compressor):
2443 def docompress(compressor):
2442 rl.clearcaches()
2444 rl.clearcaches()
2443
2445
2444 try:
2446 try:
2445 # Swap in the requested compression engine.
2447 # Swap in the requested compression engine.
2446 oldcompressor = rl._compressor
2448 oldcompressor = rl._compressor
2447 rl._compressor = compressor
2449 rl._compressor = compressor
2448 for chunk in chunks[0]:
2450 for chunk in chunks[0]:
2449 rl.compress(chunk)
2451 rl.compress(chunk)
2450 finally:
2452 finally:
2451 rl._compressor = oldcompressor
2453 rl._compressor = oldcompressor
2452
2454
2453 benches = [
2455 benches = [
2454 (lambda: doread(), b'read'),
2456 (lambda: doread(), b'read'),
2455 (lambda: doreadcachedfh(), b'read w/ reused fd'),
2457 (lambda: doreadcachedfh(), b'read w/ reused fd'),
2456 (lambda: doreadbatch(), b'read batch'),
2458 (lambda: doreadbatch(), b'read batch'),
2457 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
2459 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
2458 (lambda: dochunk(), b'chunk'),
2460 (lambda: dochunk(), b'chunk'),
2459 (lambda: dochunkbatch(), b'chunk batch'),
2461 (lambda: dochunkbatch(), b'chunk batch'),
2460 ]
2462 ]
2461
2463
2462 for engine in sorted(engines):
2464 for engine in sorted(engines):
2463 compressor = util.compengines[engine].revlogcompressor()
2465 compressor = util.compengines[engine].revlogcompressor()
2464 benches.append((functools.partial(docompress, compressor),
2466 benches.append((functools.partial(docompress, compressor),
2465 b'compress w/ %s' % engine))
2467 b'compress w/ %s' % engine))
2466
2468
2467 for fn, title in benches:
2469 for fn, title in benches:
2468 timer, fm = gettimer(ui, opts)
2470 timer, fm = gettimer(ui, opts)
2469 timer(fn, title=title)
2471 timer(fn, title=title)
2470 fm.end()
2472 fm.end()
2471
2473
2472 @command(b'perfrevlogrevision', revlogopts + formatteropts +
2474 @command(b'perfrevlogrevision', revlogopts + formatteropts +
2473 [(b'', b'cache', False, b'use caches instead of clearing')],
2475 [(b'', b'cache', False, b'use caches instead of clearing')],
2474 b'-c|-m|FILE REV')
2476 b'-c|-m|FILE REV')
2475 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
2477 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
2476 """Benchmark obtaining a revlog revision.
2478 """Benchmark obtaining a revlog revision.
2477
2479
2478 Obtaining a revlog revision consists of roughly the following steps:
2480 Obtaining a revlog revision consists of roughly the following steps:
2479
2481
2480 1. Compute the delta chain
2482 1. Compute the delta chain
2481 2. Slice the delta chain if applicable
2483 2. Slice the delta chain if applicable
2482 3. Obtain the raw chunks for that delta chain
2484 3. Obtain the raw chunks for that delta chain
2483 4. Decompress each raw chunk
2485 4. Decompress each raw chunk
2484 5. Apply binary patches to obtain fulltext
2486 5. Apply binary patches to obtain fulltext
2485 6. Verify hash of fulltext
2487 6. Verify hash of fulltext
2486
2488
2487 This command measures the time spent in each of these phases.
2489 This command measures the time spent in each of these phases.
2488 """
2490 """
2489 opts = _byteskwargs(opts)
2491 opts = _byteskwargs(opts)
2490
2492
2491 if opts.get(b'changelog') or opts.get(b'manifest'):
2493 if opts.get(b'changelog') or opts.get(b'manifest'):
2492 file_, rev = None, file_
2494 file_, rev = None, file_
2493 elif rev is None:
2495 elif rev is None:
2494 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
2496 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
2495
2497
2496 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
2498 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
2497
2499
2498 # _chunkraw was renamed to _getsegmentforrevs.
2500 # _chunkraw was renamed to _getsegmentforrevs.
2499 try:
2501 try:
2500 segmentforrevs = r._getsegmentforrevs
2502 segmentforrevs = r._getsegmentforrevs
2501 except AttributeError:
2503 except AttributeError:
2502 segmentforrevs = r._chunkraw
2504 segmentforrevs = r._chunkraw
2503
2505
2504 node = r.lookup(rev)
2506 node = r.lookup(rev)
2505 rev = r.rev(node)
2507 rev = r.rev(node)
2506
2508
2507 def getrawchunks(data, chain):
2509 def getrawchunks(data, chain):
2508 start = r.start
2510 start = r.start
2509 length = r.length
2511 length = r.length
2510 inline = r._inline
2512 inline = r._inline
2511 iosize = r._io.size
2513 iosize = r._io.size
2512 buffer = util.buffer
2514 buffer = util.buffer
2513
2515
2514 chunks = []
2516 chunks = []
2515 ladd = chunks.append
2517 ladd = chunks.append
2516 for idx, item in enumerate(chain):
2518 for idx, item in enumerate(chain):
2517 offset = start(item[0])
2519 offset = start(item[0])
2518 bits = data[idx]
2520 bits = data[idx]
2519 for rev in item:
2521 for rev in item:
2520 chunkstart = start(rev)
2522 chunkstart = start(rev)
2521 if inline:
2523 if inline:
2522 chunkstart += (rev + 1) * iosize
2524 chunkstart += (rev + 1) * iosize
2523 chunklength = length(rev)
2525 chunklength = length(rev)
2524 ladd(buffer(bits, chunkstart - offset, chunklength))
2526 ladd(buffer(bits, chunkstart - offset, chunklength))
2525
2527
2526 return chunks
2528 return chunks
2527
2529
2528 def dodeltachain(rev):
2530 def dodeltachain(rev):
2529 if not cache:
2531 if not cache:
2530 r.clearcaches()
2532 r.clearcaches()
2531 r._deltachain(rev)
2533 r._deltachain(rev)
2532
2534
2533 def doread(chain):
2535 def doread(chain):
2534 if not cache:
2536 if not cache:
2535 r.clearcaches()
2537 r.clearcaches()
2536 for item in slicedchain:
2538 for item in slicedchain:
2537 segmentforrevs(item[0], item[-1])
2539 segmentforrevs(item[0], item[-1])
2538
2540
2539 def doslice(r, chain, size):
2541 def doslice(r, chain, size):
2540 for s in slicechunk(r, chain, targetsize=size):
2542 for s in slicechunk(r, chain, targetsize=size):
2541 pass
2543 pass
2542
2544
2543 def dorawchunks(data, chain):
2545 def dorawchunks(data, chain):
2544 if not cache:
2546 if not cache:
2545 r.clearcaches()
2547 r.clearcaches()
2546 getrawchunks(data, chain)
2548 getrawchunks(data, chain)
2547
2549
2548 def dodecompress(chunks):
2550 def dodecompress(chunks):
2549 decomp = r.decompress
2551 decomp = r.decompress
2550 for chunk in chunks:
2552 for chunk in chunks:
2551 decomp(chunk)
2553 decomp(chunk)
2552
2554
2553 def dopatch(text, bins):
2555 def dopatch(text, bins):
2554 if not cache:
2556 if not cache:
2555 r.clearcaches()
2557 r.clearcaches()
2556 mdiff.patches(text, bins)
2558 mdiff.patches(text, bins)
2557
2559
2558 def dohash(text):
2560 def dohash(text):
2559 if not cache:
2561 if not cache:
2560 r.clearcaches()
2562 r.clearcaches()
2561 r.checkhash(text, node, rev=rev)
2563 r.checkhash(text, node, rev=rev)
2562
2564
2563 def dorevision():
2565 def dorevision():
2564 if not cache:
2566 if not cache:
2565 r.clearcaches()
2567 r.clearcaches()
2566 r.revision(node)
2568 r.revision(node)
2567
2569
2568 try:
2570 try:
2569 from mercurial.revlogutils.deltas import slicechunk
2571 from mercurial.revlogutils.deltas import slicechunk
2570 except ImportError:
2572 except ImportError:
2571 slicechunk = getattr(revlog, '_slicechunk', None)
2573 slicechunk = getattr(revlog, '_slicechunk', None)
2572
2574
2573 size = r.length(rev)
2575 size = r.length(rev)
2574 chain = r._deltachain(rev)[0]
2576 chain = r._deltachain(rev)[0]
2575 if not getattr(r, '_withsparseread', False):
2577 if not getattr(r, '_withsparseread', False):
2576 slicedchain = (chain,)
2578 slicedchain = (chain,)
2577 else:
2579 else:
2578 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
2580 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
2579 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
2581 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
2580 rawchunks = getrawchunks(data, slicedchain)
2582 rawchunks = getrawchunks(data, slicedchain)
2581 bins = r._chunks(chain)
2583 bins = r._chunks(chain)
2582 text = bytes(bins[0])
2584 text = bytes(bins[0])
2583 bins = bins[1:]
2585 bins = bins[1:]
2584 text = mdiff.patches(text, bins)
2586 text = mdiff.patches(text, bins)
2585
2587
2586 benches = [
2588 benches = [
2587 (lambda: dorevision(), b'full'),
2589 (lambda: dorevision(), b'full'),
2588 (lambda: dodeltachain(rev), b'deltachain'),
2590 (lambda: dodeltachain(rev), b'deltachain'),
2589 (lambda: doread(chain), b'read'),
2591 (lambda: doread(chain), b'read'),
2590 ]
2592 ]
2591
2593
2592 if getattr(r, '_withsparseread', False):
2594 if getattr(r, '_withsparseread', False):
2593 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
2595 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
2594 benches.append(slicing)
2596 benches.append(slicing)
2595
2597
2596 benches.extend([
2598 benches.extend([
2597 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
2599 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
2598 (lambda: dodecompress(rawchunks), b'decompress'),
2600 (lambda: dodecompress(rawchunks), b'decompress'),
2599 (lambda: dopatch(text, bins), b'patch'),
2601 (lambda: dopatch(text, bins), b'patch'),
2600 (lambda: dohash(text), b'hash'),
2602 (lambda: dohash(text), b'hash'),
2601 ])
2603 ])
2602
2604
2603 timer, fm = gettimer(ui, opts)
2605 timer, fm = gettimer(ui, opts)
2604 for fn, title in benches:
2606 for fn, title in benches:
2605 timer(fn, title=title)
2607 timer(fn, title=title)
2606 fm.end()
2608 fm.end()
2607
2609
2608 @command(b'perfrevset',
2610 @command(b'perfrevset',
2609 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
2611 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
2610 (b'', b'contexts', False, b'obtain changectx for each revision')]
2612 (b'', b'contexts', False, b'obtain changectx for each revision')]
2611 + formatteropts, b"REVSET")
2613 + formatteropts, b"REVSET")
2612 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
2614 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
2613 """benchmark the execution time of a revset
2615 """benchmark the execution time of a revset
2614
2616
2615 Use the --clean option if need to evaluate the impact of build volatile
2617 Use the --clean option if need to evaluate the impact of build volatile
2616 revisions set cache on the revset execution. Volatile cache hold filtered
2618 revisions set cache on the revset execution. Volatile cache hold filtered
2617 and obsolete related cache."""
2619 and obsolete related cache."""
2618 opts = _byteskwargs(opts)
2620 opts = _byteskwargs(opts)
2619
2621
2620 timer, fm = gettimer(ui, opts)
2622 timer, fm = gettimer(ui, opts)
2621 def d():
2623 def d():
2622 if clear:
2624 if clear:
2623 repo.invalidatevolatilesets()
2625 repo.invalidatevolatilesets()
2624 if contexts:
2626 if contexts:
2625 for ctx in repo.set(expr): pass
2627 for ctx in repo.set(expr): pass
2626 else:
2628 else:
2627 for r in repo.revs(expr): pass
2629 for r in repo.revs(expr): pass
2628 timer(d)
2630 timer(d)
2629 fm.end()
2631 fm.end()
2630
2632
2631 @command(b'perfvolatilesets',
2633 @command(b'perfvolatilesets',
2632 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
2634 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
2633 ] + formatteropts)
2635 ] + formatteropts)
2634 def perfvolatilesets(ui, repo, *names, **opts):
2636 def perfvolatilesets(ui, repo, *names, **opts):
2635 """benchmark the computation of various volatile set
2637 """benchmark the computation of various volatile set
2636
2638
2637 Volatile set computes element related to filtering and obsolescence."""
2639 Volatile set computes element related to filtering and obsolescence."""
2638 opts = _byteskwargs(opts)
2640 opts = _byteskwargs(opts)
2639 timer, fm = gettimer(ui, opts)
2641 timer, fm = gettimer(ui, opts)
2640 repo = repo.unfiltered()
2642 repo = repo.unfiltered()
2641
2643
2642 def getobs(name):
2644 def getobs(name):
2643 def d():
2645 def d():
2644 repo.invalidatevolatilesets()
2646 repo.invalidatevolatilesets()
2645 if opts[b'clear_obsstore']:
2647 if opts[b'clear_obsstore']:
2646 clearfilecache(repo, b'obsstore')
2648 clearfilecache(repo, b'obsstore')
2647 obsolete.getrevs(repo, name)
2649 obsolete.getrevs(repo, name)
2648 return d
2650 return d
2649
2651
2650 allobs = sorted(obsolete.cachefuncs)
2652 allobs = sorted(obsolete.cachefuncs)
2651 if names:
2653 if names:
2652 allobs = [n for n in allobs if n in names]
2654 allobs = [n for n in allobs if n in names]
2653
2655
2654 for name in allobs:
2656 for name in allobs:
2655 timer(getobs(name), title=name)
2657 timer(getobs(name), title=name)
2656
2658
2657 def getfiltered(name):
2659 def getfiltered(name):
2658 def d():
2660 def d():
2659 repo.invalidatevolatilesets()
2661 repo.invalidatevolatilesets()
2660 if opts[b'clear_obsstore']:
2662 if opts[b'clear_obsstore']:
2661 clearfilecache(repo, b'obsstore')
2663 clearfilecache(repo, b'obsstore')
2662 repoview.filterrevs(repo, name)
2664 repoview.filterrevs(repo, name)
2663 return d
2665 return d
2664
2666
2665 allfilter = sorted(repoview.filtertable)
2667 allfilter = sorted(repoview.filtertable)
2666 if names:
2668 if names:
2667 allfilter = [n for n in allfilter if n in names]
2669 allfilter = [n for n in allfilter if n in names]
2668
2670
2669 for name in allfilter:
2671 for name in allfilter:
2670 timer(getfiltered(name), title=name)
2672 timer(getfiltered(name), title=name)
2671 fm.end()
2673 fm.end()
2672
2674
2673 @command(b'perfbranchmap',
2675 @command(b'perfbranchmap',
2674 [(b'f', b'full', False,
2676 [(b'f', b'full', False,
2675 b'Includes build time of subset'),
2677 b'Includes build time of subset'),
2676 (b'', b'clear-revbranch', False,
2678 (b'', b'clear-revbranch', False,
2677 b'purge the revbranch cache between computation'),
2679 b'purge the revbranch cache between computation'),
2678 ] + formatteropts)
2680 ] + formatteropts)
2679 def perfbranchmap(ui, repo, *filternames, **opts):
2681 def perfbranchmap(ui, repo, *filternames, **opts):
2680 """benchmark the update of a branchmap
2682 """benchmark the update of a branchmap
2681
2683
2682 This benchmarks the full repo.branchmap() call with read and write disabled
2684 This benchmarks the full repo.branchmap() call with read and write disabled
2683 """
2685 """
2684 opts = _byteskwargs(opts)
2686 opts = _byteskwargs(opts)
2685 full = opts.get(b"full", False)
2687 full = opts.get(b"full", False)
2686 clear_revbranch = opts.get(b"clear_revbranch", False)
2688 clear_revbranch = opts.get(b"clear_revbranch", False)
2687 timer, fm = gettimer(ui, opts)
2689 timer, fm = gettimer(ui, opts)
2688 def getbranchmap(filtername):
2690 def getbranchmap(filtername):
2689 """generate a benchmark function for the filtername"""
2691 """generate a benchmark function for the filtername"""
2690 if filtername is None:
2692 if filtername is None:
2691 view = repo
2693 view = repo
2692 else:
2694 else:
2693 view = repo.filtered(filtername)
2695 view = repo.filtered(filtername)
2694 if util.safehasattr(view._branchcaches, '_per_filter'):
2696 if util.safehasattr(view._branchcaches, '_per_filter'):
2695 filtered = view._branchcaches._per_filter
2697 filtered = view._branchcaches._per_filter
2696 else:
2698 else:
2697 # older versions
2699 # older versions
2698 filtered = view._branchcaches
2700 filtered = view._branchcaches
2699 def d():
2701 def d():
2700 if clear_revbranch:
2702 if clear_revbranch:
2701 repo.revbranchcache()._clear()
2703 repo.revbranchcache()._clear()
2702 if full:
2704 if full:
2703 view._branchcaches.clear()
2705 view._branchcaches.clear()
2704 else:
2706 else:
2705 filtered.pop(filtername, None)
2707 filtered.pop(filtername, None)
2706 view.branchmap()
2708 view.branchmap()
2707 return d
2709 return d
2708 # add filter in smaller subset to bigger subset
2710 # add filter in smaller subset to bigger subset
2709 possiblefilters = set(repoview.filtertable)
2711 possiblefilters = set(repoview.filtertable)
2710 if filternames:
2712 if filternames:
2711 possiblefilters &= set(filternames)
2713 possiblefilters &= set(filternames)
2712 subsettable = getbranchmapsubsettable()
2714 subsettable = getbranchmapsubsettable()
2713 allfilters = []
2715 allfilters = []
2714 while possiblefilters:
2716 while possiblefilters:
2715 for name in possiblefilters:
2717 for name in possiblefilters:
2716 subset = subsettable.get(name)
2718 subset = subsettable.get(name)
2717 if subset not in possiblefilters:
2719 if subset not in possiblefilters:
2718 break
2720 break
2719 else:
2721 else:
2720 assert False, b'subset cycle %s!' % possiblefilters
2722 assert False, b'subset cycle %s!' % possiblefilters
2721 allfilters.append(name)
2723 allfilters.append(name)
2722 possiblefilters.remove(name)
2724 possiblefilters.remove(name)
2723
2725
2724 # warm the cache
2726 # warm the cache
2725 if not full:
2727 if not full:
2726 for name in allfilters:
2728 for name in allfilters:
2727 repo.filtered(name).branchmap()
2729 repo.filtered(name).branchmap()
2728 if not filternames or b'unfiltered' in filternames:
2730 if not filternames or b'unfiltered' in filternames:
2729 # add unfiltered
2731 # add unfiltered
2730 allfilters.append(None)
2732 allfilters.append(None)
2731
2733
2732 if util.safehasattr(branchmap.branchcache, 'fromfile'):
2734 if util.safehasattr(branchmap.branchcache, 'fromfile'):
2733 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
2735 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
2734 branchcacheread.set(classmethod(lambda *args: None))
2736 branchcacheread.set(classmethod(lambda *args: None))
2735 else:
2737 else:
2736 # older versions
2738 # older versions
2737 branchcacheread = safeattrsetter(branchmap, b'read')
2739 branchcacheread = safeattrsetter(branchmap, b'read')
2738 branchcacheread.set(lambda *args: None)
2740 branchcacheread.set(lambda *args: None)
2739 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
2741 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
2740 branchcachewrite.set(lambda *args: None)
2742 branchcachewrite.set(lambda *args: None)
2741 try:
2743 try:
2742 for name in allfilters:
2744 for name in allfilters:
2743 printname = name
2745 printname = name
2744 if name is None:
2746 if name is None:
2745 printname = b'unfiltered'
2747 printname = b'unfiltered'
2746 timer(getbranchmap(name), title=str(printname))
2748 timer(getbranchmap(name), title=str(printname))
2747 finally:
2749 finally:
2748 branchcacheread.restore()
2750 branchcacheread.restore()
2749 branchcachewrite.restore()
2751 branchcachewrite.restore()
2750 fm.end()
2752 fm.end()
2751
2753
2752 @command(b'perfbranchmapupdate', [
2754 @command(b'perfbranchmapupdate', [
2753 (b'', b'base', [], b'subset of revision to start from'),
2755 (b'', b'base', [], b'subset of revision to start from'),
2754 (b'', b'target', [], b'subset of revision to end with'),
2756 (b'', b'target', [], b'subset of revision to end with'),
2755 (b'', b'clear-caches', False, b'clear cache between each runs')
2757 (b'', b'clear-caches', False, b'clear cache between each runs')
2756 ] + formatteropts)
2758 ] + formatteropts)
2757 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
2759 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
2758 """benchmark branchmap update from for <base> revs to <target> revs
2760 """benchmark branchmap update from for <base> revs to <target> revs
2759
2761
2760 If `--clear-caches` is passed, the following items will be reset before
2762 If `--clear-caches` is passed, the following items will be reset before
2761 each update:
2763 each update:
2762 * the changelog instance and associated indexes
2764 * the changelog instance and associated indexes
2763 * the rev-branch-cache instance
2765 * the rev-branch-cache instance
2764
2766
2765 Examples:
2767 Examples:
2766
2768
2767 # update for the one last revision
2769 # update for the one last revision
2768 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
2770 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
2769
2771
2770 $ update for change coming with a new branch
2772 $ update for change coming with a new branch
2771 $ hg perfbranchmapupdate --base 'stable' --target 'default'
2773 $ hg perfbranchmapupdate --base 'stable' --target 'default'
2772 """
2774 """
2773 from mercurial import branchmap
2775 from mercurial import branchmap
2774 from mercurial import repoview
2776 from mercurial import repoview
2775 opts = _byteskwargs(opts)
2777 opts = _byteskwargs(opts)
2776 timer, fm = gettimer(ui, opts)
2778 timer, fm = gettimer(ui, opts)
2777 clearcaches = opts[b'clear_caches']
2779 clearcaches = opts[b'clear_caches']
2778 unfi = repo.unfiltered()
2780 unfi = repo.unfiltered()
2779 x = [None] # used to pass data between closure
2781 x = [None] # used to pass data between closure
2780
2782
2781 # we use a `list` here to avoid possible side effect from smartset
2783 # we use a `list` here to avoid possible side effect from smartset
2782 baserevs = list(scmutil.revrange(repo, base))
2784 baserevs = list(scmutil.revrange(repo, base))
2783 targetrevs = list(scmutil.revrange(repo, target))
2785 targetrevs = list(scmutil.revrange(repo, target))
2784 if not baserevs:
2786 if not baserevs:
2785 raise error.Abort(b'no revisions selected for --base')
2787 raise error.Abort(b'no revisions selected for --base')
2786 if not targetrevs:
2788 if not targetrevs:
2787 raise error.Abort(b'no revisions selected for --target')
2789 raise error.Abort(b'no revisions selected for --target')
2788
2790
2789 # make sure the target branchmap also contains the one in the base
2791 # make sure the target branchmap also contains the one in the base
2790 targetrevs = list(set(baserevs) | set(targetrevs))
2792 targetrevs = list(set(baserevs) | set(targetrevs))
2791 targetrevs.sort()
2793 targetrevs.sort()
2792
2794
2793 cl = repo.changelog
2795 cl = repo.changelog
2794 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
2796 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
2795 allbaserevs.sort()
2797 allbaserevs.sort()
2796 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
2798 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
2797
2799
2798 newrevs = list(alltargetrevs.difference(allbaserevs))
2800 newrevs = list(alltargetrevs.difference(allbaserevs))
2799 newrevs.sort()
2801 newrevs.sort()
2800
2802
2801 allrevs = frozenset(unfi.changelog.revs())
2803 allrevs = frozenset(unfi.changelog.revs())
2802 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
2804 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
2803 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
2805 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
2804
2806
2805 def basefilter(repo, visibilityexceptions=None):
2807 def basefilter(repo, visibilityexceptions=None):
2806 return basefilterrevs
2808 return basefilterrevs
2807
2809
2808 def targetfilter(repo, visibilityexceptions=None):
2810 def targetfilter(repo, visibilityexceptions=None):
2809 return targetfilterrevs
2811 return targetfilterrevs
2810
2812
2811 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
2813 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
2812 ui.status(msg % (len(allbaserevs), len(newrevs)))
2814 ui.status(msg % (len(allbaserevs), len(newrevs)))
2813 if targetfilterrevs:
2815 if targetfilterrevs:
2814 msg = b'(%d revisions still filtered)\n'
2816 msg = b'(%d revisions still filtered)\n'
2815 ui.status(msg % len(targetfilterrevs))
2817 ui.status(msg % len(targetfilterrevs))
2816
2818
2817 try:
2819 try:
2818 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
2820 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
2819 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
2821 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
2820
2822
2821 baserepo = repo.filtered(b'__perf_branchmap_update_base')
2823 baserepo = repo.filtered(b'__perf_branchmap_update_base')
2822 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
2824 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
2823
2825
2824 # try to find an existing branchmap to reuse
2826 # try to find an existing branchmap to reuse
2825 subsettable = getbranchmapsubsettable()
2827 subsettable = getbranchmapsubsettable()
2826 candidatefilter = subsettable.get(None)
2828 candidatefilter = subsettable.get(None)
2827 while candidatefilter is not None:
2829 while candidatefilter is not None:
2828 candidatebm = repo.filtered(candidatefilter).branchmap()
2830 candidatebm = repo.filtered(candidatefilter).branchmap()
2829 if candidatebm.validfor(baserepo):
2831 if candidatebm.validfor(baserepo):
2830 filtered = repoview.filterrevs(repo, candidatefilter)
2832 filtered = repoview.filterrevs(repo, candidatefilter)
2831 missing = [r for r in allbaserevs if r in filtered]
2833 missing = [r for r in allbaserevs if r in filtered]
2832 base = candidatebm.copy()
2834 base = candidatebm.copy()
2833 base.update(baserepo, missing)
2835 base.update(baserepo, missing)
2834 break
2836 break
2835 candidatefilter = subsettable.get(candidatefilter)
2837 candidatefilter = subsettable.get(candidatefilter)
2836 else:
2838 else:
2837 # no suitable subset where found
2839 # no suitable subset where found
2838 base = branchmap.branchcache()
2840 base = branchmap.branchcache()
2839 base.update(baserepo, allbaserevs)
2841 base.update(baserepo, allbaserevs)
2840
2842
2841 def setup():
2843 def setup():
2842 x[0] = base.copy()
2844 x[0] = base.copy()
2843 if clearcaches:
2845 if clearcaches:
2844 unfi._revbranchcache = None
2846 unfi._revbranchcache = None
2845 clearchangelog(repo)
2847 clearchangelog(repo)
2846
2848
2847 def bench():
2849 def bench():
2848 x[0].update(targetrepo, newrevs)
2850 x[0].update(targetrepo, newrevs)
2849
2851
2850 timer(bench, setup=setup)
2852 timer(bench, setup=setup)
2851 fm.end()
2853 fm.end()
2852 finally:
2854 finally:
2853 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
2855 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
2854 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
2856 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
2855
2857
2856 @command(b'perfbranchmapload', [
2858 @command(b'perfbranchmapload', [
2857 (b'f', b'filter', b'', b'Specify repoview filter'),
2859 (b'f', b'filter', b'', b'Specify repoview filter'),
2858 (b'', b'list', False, b'List brachmap filter caches'),
2860 (b'', b'list', False, b'List brachmap filter caches'),
2859 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
2861 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
2860
2862
2861 ] + formatteropts)
2863 ] + formatteropts)
2862 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
2864 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
2863 """benchmark reading the branchmap"""
2865 """benchmark reading the branchmap"""
2864 opts = _byteskwargs(opts)
2866 opts = _byteskwargs(opts)
2865 clearrevlogs = opts[b'clear_revlogs']
2867 clearrevlogs = opts[b'clear_revlogs']
2866
2868
2867 if list:
2869 if list:
2868 for name, kind, st in repo.cachevfs.readdir(stat=True):
2870 for name, kind, st in repo.cachevfs.readdir(stat=True):
2869 if name.startswith(b'branch2'):
2871 if name.startswith(b'branch2'):
2870 filtername = name.partition(b'-')[2] or b'unfiltered'
2872 filtername = name.partition(b'-')[2] or b'unfiltered'
2871 ui.status(b'%s - %s\n'
2873 ui.status(b'%s - %s\n'
2872 % (filtername, util.bytecount(st.st_size)))
2874 % (filtername, util.bytecount(st.st_size)))
2873 return
2875 return
2874 if not filter:
2876 if not filter:
2875 filter = None
2877 filter = None
2876 subsettable = getbranchmapsubsettable()
2878 subsettable = getbranchmapsubsettable()
2877 if filter is None:
2879 if filter is None:
2878 repo = repo.unfiltered()
2880 repo = repo.unfiltered()
2879 else:
2881 else:
2880 repo = repoview.repoview(repo, filter)
2882 repo = repoview.repoview(repo, filter)
2881
2883
2882 repo.branchmap() # make sure we have a relevant, up to date branchmap
2884 repo.branchmap() # make sure we have a relevant, up to date branchmap
2883
2885
2884 try:
2886 try:
2885 fromfile = branchmap.branchcache.fromfile
2887 fromfile = branchmap.branchcache.fromfile
2886 except AttributeError:
2888 except AttributeError:
2887 # older versions
2889 # older versions
2888 fromfile = branchmap.read
2890 fromfile = branchmap.read
2889
2891
2890 currentfilter = filter
2892 currentfilter = filter
2891 # try once without timer, the filter may not be cached
2893 # try once without timer, the filter may not be cached
2892 while fromfile(repo) is None:
2894 while fromfile(repo) is None:
2893 currentfilter = subsettable.get(currentfilter)
2895 currentfilter = subsettable.get(currentfilter)
2894 if currentfilter is None:
2896 if currentfilter is None:
2895 raise error.Abort(b'No branchmap cached for %s repo'
2897 raise error.Abort(b'No branchmap cached for %s repo'
2896 % (filter or b'unfiltered'))
2898 % (filter or b'unfiltered'))
2897 repo = repo.filtered(currentfilter)
2899 repo = repo.filtered(currentfilter)
2898 timer, fm = gettimer(ui, opts)
2900 timer, fm = gettimer(ui, opts)
2899 def setup():
2901 def setup():
2900 if clearrevlogs:
2902 if clearrevlogs:
2901 clearchangelog(repo)
2903 clearchangelog(repo)
2902 def bench():
2904 def bench():
2903 fromfile(repo)
2905 fromfile(repo)
2904 timer(bench, setup=setup)
2906 timer(bench, setup=setup)
2905 fm.end()
2907 fm.end()
2906
2908
2907 @command(b'perfloadmarkers')
2909 @command(b'perfloadmarkers')
2908 def perfloadmarkers(ui, repo):
2910 def perfloadmarkers(ui, repo):
2909 """benchmark the time to parse the on-disk markers for a repo
2911 """benchmark the time to parse the on-disk markers for a repo
2910
2912
2911 Result is the number of markers in the repo."""
2913 Result is the number of markers in the repo."""
2912 timer, fm = gettimer(ui)
2914 timer, fm = gettimer(ui)
2913 svfs = getsvfs(repo)
2915 svfs = getsvfs(repo)
2914 timer(lambda: len(obsolete.obsstore(svfs)))
2916 timer(lambda: len(obsolete.obsstore(svfs)))
2915 fm.end()
2917 fm.end()
2916
2918
2917 @command(b'perflrucachedict', formatteropts +
2919 @command(b'perflrucachedict', formatteropts +
2918 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
2920 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
2919 (b'', b'mincost', 0, b'smallest cost of items in cache'),
2921 (b'', b'mincost', 0, b'smallest cost of items in cache'),
2920 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
2922 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
2921 (b'', b'size', 4, b'size of cache'),
2923 (b'', b'size', 4, b'size of cache'),
2922 (b'', b'gets', 10000, b'number of key lookups'),
2924 (b'', b'gets', 10000, b'number of key lookups'),
2923 (b'', b'sets', 10000, b'number of key sets'),
2925 (b'', b'sets', 10000, b'number of key sets'),
2924 (b'', b'mixed', 10000, b'number of mixed mode operations'),
2926 (b'', b'mixed', 10000, b'number of mixed mode operations'),
2925 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
2927 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
2926 norepo=True)
2928 norepo=True)
2927 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
2929 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
2928 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
2930 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
2929 opts = _byteskwargs(opts)
2931 opts = _byteskwargs(opts)
2930
2932
2931 def doinit():
2933 def doinit():
2932 for i in _xrange(10000):
2934 for i in _xrange(10000):
2933 util.lrucachedict(size)
2935 util.lrucachedict(size)
2934
2936
2935 costrange = list(range(mincost, maxcost + 1))
2937 costrange = list(range(mincost, maxcost + 1))
2936
2938
2937 values = []
2939 values = []
2938 for i in _xrange(size):
2940 for i in _xrange(size):
2939 values.append(random.randint(0, _maxint))
2941 values.append(random.randint(0, _maxint))
2940
2942
2941 # Get mode fills the cache and tests raw lookup performance with no
2943 # Get mode fills the cache and tests raw lookup performance with no
2942 # eviction.
2944 # eviction.
2943 getseq = []
2945 getseq = []
2944 for i in _xrange(gets):
2946 for i in _xrange(gets):
2945 getseq.append(random.choice(values))
2947 getseq.append(random.choice(values))
2946
2948
2947 def dogets():
2949 def dogets():
2948 d = util.lrucachedict(size)
2950 d = util.lrucachedict(size)
2949 for v in values:
2951 for v in values:
2950 d[v] = v
2952 d[v] = v
2951 for key in getseq:
2953 for key in getseq:
2952 value = d[key]
2954 value = d[key]
2953 value # silence pyflakes warning
2955 value # silence pyflakes warning
2954
2956
2955 def dogetscost():
2957 def dogetscost():
2956 d = util.lrucachedict(size, maxcost=costlimit)
2958 d = util.lrucachedict(size, maxcost=costlimit)
2957 for i, v in enumerate(values):
2959 for i, v in enumerate(values):
2958 d.insert(v, v, cost=costs[i])
2960 d.insert(v, v, cost=costs[i])
2959 for key in getseq:
2961 for key in getseq:
2960 try:
2962 try:
2961 value = d[key]
2963 value = d[key]
2962 value # silence pyflakes warning
2964 value # silence pyflakes warning
2963 except KeyError:
2965 except KeyError:
2964 pass
2966 pass
2965
2967
2966 # Set mode tests insertion speed with cache eviction.
2968 # Set mode tests insertion speed with cache eviction.
2967 setseq = []
2969 setseq = []
2968 costs = []
2970 costs = []
2969 for i in _xrange(sets):
2971 for i in _xrange(sets):
2970 setseq.append(random.randint(0, _maxint))
2972 setseq.append(random.randint(0, _maxint))
2971 costs.append(random.choice(costrange))
2973 costs.append(random.choice(costrange))
2972
2974
2973 def doinserts():
2975 def doinserts():
2974 d = util.lrucachedict(size)
2976 d = util.lrucachedict(size)
2975 for v in setseq:
2977 for v in setseq:
2976 d.insert(v, v)
2978 d.insert(v, v)
2977
2979
2978 def doinsertscost():
2980 def doinsertscost():
2979 d = util.lrucachedict(size, maxcost=costlimit)
2981 d = util.lrucachedict(size, maxcost=costlimit)
2980 for i, v in enumerate(setseq):
2982 for i, v in enumerate(setseq):
2981 d.insert(v, v, cost=costs[i])
2983 d.insert(v, v, cost=costs[i])
2982
2984
2983 def dosets():
2985 def dosets():
2984 d = util.lrucachedict(size)
2986 d = util.lrucachedict(size)
2985 for v in setseq:
2987 for v in setseq:
2986 d[v] = v
2988 d[v] = v
2987
2989
2988 # Mixed mode randomly performs gets and sets with eviction.
2990 # Mixed mode randomly performs gets and sets with eviction.
2989 mixedops = []
2991 mixedops = []
2990 for i in _xrange(mixed):
2992 for i in _xrange(mixed):
2991 r = random.randint(0, 100)
2993 r = random.randint(0, 100)
2992 if r < mixedgetfreq:
2994 if r < mixedgetfreq:
2993 op = 0
2995 op = 0
2994 else:
2996 else:
2995 op = 1
2997 op = 1
2996
2998
2997 mixedops.append((op,
2999 mixedops.append((op,
2998 random.randint(0, size * 2),
3000 random.randint(0, size * 2),
2999 random.choice(costrange)))
3001 random.choice(costrange)))
3000
3002
3001 def domixed():
3003 def domixed():
3002 d = util.lrucachedict(size)
3004 d = util.lrucachedict(size)
3003
3005
3004 for op, v, cost in mixedops:
3006 for op, v, cost in mixedops:
3005 if op == 0:
3007 if op == 0:
3006 try:
3008 try:
3007 d[v]
3009 d[v]
3008 except KeyError:
3010 except KeyError:
3009 pass
3011 pass
3010 else:
3012 else:
3011 d[v] = v
3013 d[v] = v
3012
3014
3013 def domixedcost():
3015 def domixedcost():
3014 d = util.lrucachedict(size, maxcost=costlimit)
3016 d = util.lrucachedict(size, maxcost=costlimit)
3015
3017
3016 for op, v, cost in mixedops:
3018 for op, v, cost in mixedops:
3017 if op == 0:
3019 if op == 0:
3018 try:
3020 try:
3019 d[v]
3021 d[v]
3020 except KeyError:
3022 except KeyError:
3021 pass
3023 pass
3022 else:
3024 else:
3023 d.insert(v, v, cost=cost)
3025 d.insert(v, v, cost=cost)
3024
3026
3025 benches = [
3027 benches = [
3026 (doinit, b'init'),
3028 (doinit, b'init'),
3027 ]
3029 ]
3028
3030
3029 if costlimit:
3031 if costlimit:
3030 benches.extend([
3032 benches.extend([
3031 (dogetscost, b'gets w/ cost limit'),
3033 (dogetscost, b'gets w/ cost limit'),
3032 (doinsertscost, b'inserts w/ cost limit'),
3034 (doinsertscost, b'inserts w/ cost limit'),
3033 (domixedcost, b'mixed w/ cost limit'),
3035 (domixedcost, b'mixed w/ cost limit'),
3034 ])
3036 ])
3035 else:
3037 else:
3036 benches.extend([
3038 benches.extend([
3037 (dogets, b'gets'),
3039 (dogets, b'gets'),
3038 (doinserts, b'inserts'),
3040 (doinserts, b'inserts'),
3039 (dosets, b'sets'),
3041 (dosets, b'sets'),
3040 (domixed, b'mixed')
3042 (domixed, b'mixed')
3041 ])
3043 ])
3042
3044
3043 for fn, title in benches:
3045 for fn, title in benches:
3044 timer, fm = gettimer(ui, opts)
3046 timer, fm = gettimer(ui, opts)
3045 timer(fn, title=title)
3047 timer(fn, title=title)
3046 fm.end()
3048 fm.end()
3047
3049
3048 @command(b'perfwrite', formatteropts)
3050 @command(b'perfwrite', formatteropts)
3049 def perfwrite(ui, repo, **opts):
3051 def perfwrite(ui, repo, **opts):
3050 """microbenchmark ui.write
3052 """microbenchmark ui.write
3051 """
3053 """
3052 opts = _byteskwargs(opts)
3054 opts = _byteskwargs(opts)
3053
3055
3054 timer, fm = gettimer(ui, opts)
3056 timer, fm = gettimer(ui, opts)
3055 def write():
3057 def write():
3056 for i in range(100000):
3058 for i in range(100000):
3057 ui.write((b'Testing write performance\n'))
3059 ui.write((b'Testing write performance\n'))
3058 timer(write)
3060 timer(write)
3059 fm.end()
3061 fm.end()
3060
3062
3061 def uisetup(ui):
3063 def uisetup(ui):
3062 if (util.safehasattr(cmdutil, b'openrevlog') and
3064 if (util.safehasattr(cmdutil, b'openrevlog') and
3063 not util.safehasattr(commands, b'debugrevlogopts')):
3065 not util.safehasattr(commands, b'debugrevlogopts')):
3064 # for "historical portability":
3066 # for "historical portability":
3065 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
3067 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
3066 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
3068 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
3067 # openrevlog() should cause failure, because it has been
3069 # openrevlog() should cause failure, because it has been
3068 # available since 3.5 (or 49c583ca48c4).
3070 # available since 3.5 (or 49c583ca48c4).
3069 def openrevlog(orig, repo, cmd, file_, opts):
3071 def openrevlog(orig, repo, cmd, file_, opts):
3070 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
3072 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
3071 raise error.Abort(b"This version doesn't support --dir option",
3073 raise error.Abort(b"This version doesn't support --dir option",
3072 hint=b"use 3.5 or later")
3074 hint=b"use 3.5 or later")
3073 return orig(repo, cmd, file_, opts)
3075 return orig(repo, cmd, file_, opts)
3074 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
3076 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
3075
3077
3076 @command(b'perfprogress', formatteropts + [
3078 @command(b'perfprogress', formatteropts + [
3077 (b'', b'topic', b'topic', b'topic for progress messages'),
3079 (b'', b'topic', b'topic', b'topic for progress messages'),
3078 (b'c', b'total', 1000000, b'total value we are progressing to'),
3080 (b'c', b'total', 1000000, b'total value we are progressing to'),
3079 ], norepo=True)
3081 ], norepo=True)
3080 def perfprogress(ui, topic=None, total=None, **opts):
3082 def perfprogress(ui, topic=None, total=None, **opts):
3081 """printing of progress bars"""
3083 """printing of progress bars"""
3082 opts = _byteskwargs(opts)
3084 opts = _byteskwargs(opts)
3083
3085
3084 timer, fm = gettimer(ui, opts)
3086 timer, fm = gettimer(ui, opts)
3085
3087
3086 def doprogress():
3088 def doprogress():
3087 with ui.makeprogress(topic, total=total) as progress:
3089 with ui.makeprogress(topic, total=total) as progress:
3088 for i in pycompat.xrange(total):
3090 for i in _xrange(total):
3089 progress.increment()
3091 progress.increment()
3090
3092
3091 timer(doprogress)
3093 timer(doprogress)
3092 fm.end()
3094 fm.end()
General Comments 0
You need to be logged in to leave comments. Login now