##// END OF EJS Templates
perf: add a --stats argument to perfhelper-mergecopies...
marmoute -
r43211:3a1ad3ae default
parent child Browse files
Show More
@@ -1,3094 +1,3228 b''
1 # perf.py - performance test routines
1 # perf.py - performance test routines
2 '''helper extension to measure performance
2 '''helper extension to measure performance
3
3
4 Configurations
4 Configurations
5 ==============
5 ==============
6
6
7 ``perf``
7 ``perf``
8 --------
8 --------
9
9
10 ``all-timing``
10 ``all-timing``
11 When set, additional statistics will be reported for each benchmark: best,
11 When set, additional statistics will be reported for each benchmark: best,
12 worst, median average. If not set only the best timing is reported
12 worst, median average. If not set only the best timing is reported
13 (default: off).
13 (default: off).
14
14
15 ``presleep``
15 ``presleep``
16 number of second to wait before any group of runs (default: 1)
16 number of second to wait before any group of runs (default: 1)
17
17
18 ``pre-run``
18 ``pre-run``
19 number of run to perform before starting measurement.
19 number of run to perform before starting measurement.
20
20
21 ``profile-benchmark``
21 ``profile-benchmark``
22 Enable profiling for the benchmarked section.
22 Enable profiling for the benchmarked section.
23 (The first iteration is benchmarked)
23 (The first iteration is benchmarked)
24
24
25 ``run-limits``
25 ``run-limits``
26 Control the number of runs each benchmark will perform. The option value
26 Control the number of runs each benchmark will perform. The option value
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
28 conditions are considered in order with the following logic:
28 conditions are considered in order with the following logic:
29
29
30 If benchmark has been running for <time> seconds, and we have performed
30 If benchmark has been running for <time> seconds, and we have performed
31 <numberofrun> iterations, stop the benchmark,
31 <numberofrun> iterations, stop the benchmark,
32
32
33 The default value is: `3.0-100, 10.0-3`
33 The default value is: `3.0-100, 10.0-3`
34
34
35 ``stub``
35 ``stub``
36 When set, benchmarks will only be run once, useful for testing
36 When set, benchmarks will only be run once, useful for testing
37 (default: off)
37 (default: off)
38 '''
38 '''
39
39
40 # "historical portability" policy of perf.py:
40 # "historical portability" policy of perf.py:
41 #
41 #
42 # We have to do:
42 # We have to do:
43 # - make perf.py "loadable" with as wide Mercurial version as possible
43 # - make perf.py "loadable" with as wide Mercurial version as possible
44 # This doesn't mean that perf commands work correctly with that Mercurial.
44 # This doesn't mean that perf commands work correctly with that Mercurial.
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
46 # - make historical perf command work correctly with as wide Mercurial
46 # - make historical perf command work correctly with as wide Mercurial
47 # version as possible
47 # version as possible
48 #
48 #
49 # We have to do, if possible with reasonable cost:
49 # We have to do, if possible with reasonable cost:
50 # - make recent perf command for historical feature work correctly
50 # - make recent perf command for historical feature work correctly
51 # with early Mercurial
51 # with early Mercurial
52 #
52 #
53 # We don't have to do:
53 # We don't have to do:
54 # - make perf command for recent feature work correctly with early
54 # - make perf command for recent feature work correctly with early
55 # Mercurial
55 # Mercurial
56
56
57 from __future__ import absolute_import
57 from __future__ import absolute_import
58 import contextlib
58 import contextlib
59 import functools
59 import functools
60 import gc
60 import gc
61 import os
61 import os
62 import random
62 import random
63 import shutil
63 import shutil
64 import struct
64 import struct
65 import sys
65 import sys
66 import tempfile
66 import tempfile
67 import threading
67 import threading
68 import time
68 import time
69 from mercurial import (
69 from mercurial import (
70 changegroup,
70 changegroup,
71 cmdutil,
71 cmdutil,
72 commands,
72 commands,
73 copies,
73 copies,
74 error,
74 error,
75 extensions,
75 extensions,
76 hg,
76 hg,
77 mdiff,
77 mdiff,
78 merge,
78 merge,
79 revlog,
79 revlog,
80 util,
80 util,
81 )
81 )
82
82
83 # for "historical portability":
83 # for "historical portability":
84 # try to import modules separately (in dict order), and ignore
84 # try to import modules separately (in dict order), and ignore
85 # failure, because these aren't available with early Mercurial
85 # failure, because these aren't available with early Mercurial
86 try:
86 try:
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
88 except ImportError:
88 except ImportError:
89 pass
89 pass
90 try:
90 try:
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
92 except ImportError:
92 except ImportError:
93 pass
93 pass
94 try:
94 try:
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
96 dir(registrar) # forcibly load it
96 dir(registrar) # forcibly load it
97 except ImportError:
97 except ImportError:
98 registrar = None
98 registrar = None
99 try:
99 try:
100 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
100 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
101 except ImportError:
101 except ImportError:
102 pass
102 pass
103 try:
103 try:
104 from mercurial.utils import repoviewutil # since 5.0
104 from mercurial.utils import repoviewutil # since 5.0
105 except ImportError:
105 except ImportError:
106 repoviewutil = None
106 repoviewutil = None
107 try:
107 try:
108 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
108 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
109 except ImportError:
109 except ImportError:
110 pass
110 pass
111 try:
111 try:
112 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
112 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
113 except ImportError:
113 except ImportError:
114 pass
114 pass
115
115
116 try:
116 try:
117 from mercurial import profiling
117 from mercurial import profiling
118 except ImportError:
118 except ImportError:
119 profiling = None
119 profiling = None
120
120
121 def identity(a):
121 def identity(a):
122 return a
122 return a
123
123
124 try:
124 try:
125 from mercurial import pycompat
125 from mercurial import pycompat
126 getargspec = pycompat.getargspec # added to module after 4.5
126 getargspec = pycompat.getargspec # added to module after 4.5
127 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
127 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
128 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
128 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
129 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
129 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
130 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
130 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
131 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
131 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
132 if pycompat.ispy3:
132 if pycompat.ispy3:
133 _maxint = sys.maxsize # per py3 docs for replacing maxint
133 _maxint = sys.maxsize # per py3 docs for replacing maxint
134 else:
134 else:
135 _maxint = sys.maxint
135 _maxint = sys.maxint
136 except (NameError, ImportError, AttributeError):
136 except (NameError, ImportError, AttributeError):
137 import inspect
137 import inspect
138 getargspec = inspect.getargspec
138 getargspec = inspect.getargspec
139 _byteskwargs = identity
139 _byteskwargs = identity
140 _bytestr = str
140 _bytestr = str
141 fsencode = identity # no py3 support
141 fsencode = identity # no py3 support
142 _maxint = sys.maxint # no py3 support
142 _maxint = sys.maxint # no py3 support
143 _sysstr = lambda x: x # no py3 support
143 _sysstr = lambda x: x # no py3 support
144 _xrange = xrange
144 _xrange = xrange
145
145
146 try:
146 try:
147 # 4.7+
147 # 4.7+
148 queue = pycompat.queue.Queue
148 queue = pycompat.queue.Queue
149 except (NameError, AttributeError, ImportError):
149 except (NameError, AttributeError, ImportError):
150 # <4.7.
150 # <4.7.
151 try:
151 try:
152 queue = pycompat.queue
152 queue = pycompat.queue
153 except (NameError, AttributeError, ImportError):
153 except (NameError, AttributeError, ImportError):
154 import Queue as queue
154 import Queue as queue
155
155
156 try:
156 try:
157 from mercurial import logcmdutil
157 from mercurial import logcmdutil
158 makelogtemplater = logcmdutil.maketemplater
158 makelogtemplater = logcmdutil.maketemplater
159 except (AttributeError, ImportError):
159 except (AttributeError, ImportError):
160 try:
160 try:
161 makelogtemplater = cmdutil.makelogtemplater
161 makelogtemplater = cmdutil.makelogtemplater
162 except (AttributeError, ImportError):
162 except (AttributeError, ImportError):
163 makelogtemplater = None
163 makelogtemplater = None
164
164
165 # for "historical portability":
165 # for "historical portability":
166 # define util.safehasattr forcibly, because util.safehasattr has been
166 # define util.safehasattr forcibly, because util.safehasattr has been
167 # available since 1.9.3 (or 94b200a11cf7)
167 # available since 1.9.3 (or 94b200a11cf7)
168 _undefined = object()
168 _undefined = object()
169 def safehasattr(thing, attr):
169 def safehasattr(thing, attr):
170 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
170 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
171 setattr(util, 'safehasattr', safehasattr)
171 setattr(util, 'safehasattr', safehasattr)
172
172
173 # for "historical portability":
173 # for "historical portability":
174 # define util.timer forcibly, because util.timer has been available
174 # define util.timer forcibly, because util.timer has been available
175 # since ae5d60bb70c9
175 # since ae5d60bb70c9
176 if safehasattr(time, 'perf_counter'):
176 if safehasattr(time, 'perf_counter'):
177 util.timer = time.perf_counter
177 util.timer = time.perf_counter
178 elif os.name == b'nt':
178 elif os.name == b'nt':
179 util.timer = time.clock
179 util.timer = time.clock
180 else:
180 else:
181 util.timer = time.time
181 util.timer = time.time
182
182
183 # for "historical portability":
183 # for "historical portability":
184 # use locally defined empty option list, if formatteropts isn't
184 # use locally defined empty option list, if formatteropts isn't
185 # available, because commands.formatteropts has been available since
185 # available, because commands.formatteropts has been available since
186 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
186 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
187 # available since 2.2 (or ae5f92e154d3)
187 # available since 2.2 (or ae5f92e154d3)
188 formatteropts = getattr(cmdutil, "formatteropts",
188 formatteropts = getattr(cmdutil, "formatteropts",
189 getattr(commands, "formatteropts", []))
189 getattr(commands, "formatteropts", []))
190
190
191 # for "historical portability":
191 # for "historical portability":
192 # use locally defined option list, if debugrevlogopts isn't available,
192 # use locally defined option list, if debugrevlogopts isn't available,
193 # because commands.debugrevlogopts has been available since 3.7 (or
193 # because commands.debugrevlogopts has been available since 3.7 (or
194 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
194 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
195 # since 1.9 (or a79fea6b3e77).
195 # since 1.9 (or a79fea6b3e77).
196 revlogopts = getattr(cmdutil, "debugrevlogopts",
196 revlogopts = getattr(cmdutil, "debugrevlogopts",
197 getattr(commands, "debugrevlogopts", [
197 getattr(commands, "debugrevlogopts", [
198 (b'c', b'changelog', False, (b'open changelog')),
198 (b'c', b'changelog', False, (b'open changelog')),
199 (b'm', b'manifest', False, (b'open manifest')),
199 (b'm', b'manifest', False, (b'open manifest')),
200 (b'', b'dir', False, (b'open directory manifest')),
200 (b'', b'dir', False, (b'open directory manifest')),
201 ]))
201 ]))
202
202
203 cmdtable = {}
203 cmdtable = {}
204
204
205 # for "historical portability":
205 # for "historical portability":
206 # define parsealiases locally, because cmdutil.parsealiases has been
206 # define parsealiases locally, because cmdutil.parsealiases has been
207 # available since 1.5 (or 6252852b4332)
207 # available since 1.5 (or 6252852b4332)
208 def parsealiases(cmd):
208 def parsealiases(cmd):
209 return cmd.split(b"|")
209 return cmd.split(b"|")
210
210
211 if safehasattr(registrar, 'command'):
211 if safehasattr(registrar, 'command'):
212 command = registrar.command(cmdtable)
212 command = registrar.command(cmdtable)
213 elif safehasattr(cmdutil, 'command'):
213 elif safehasattr(cmdutil, 'command'):
214 command = cmdutil.command(cmdtable)
214 command = cmdutil.command(cmdtable)
215 if b'norepo' not in getargspec(command).args:
215 if b'norepo' not in getargspec(command).args:
216 # for "historical portability":
216 # for "historical portability":
217 # wrap original cmdutil.command, because "norepo" option has
217 # wrap original cmdutil.command, because "norepo" option has
218 # been available since 3.1 (or 75a96326cecb)
218 # been available since 3.1 (or 75a96326cecb)
219 _command = command
219 _command = command
220 def command(name, options=(), synopsis=None, norepo=False):
220 def command(name, options=(), synopsis=None, norepo=False):
221 if norepo:
221 if norepo:
222 commands.norepo += b' %s' % b' '.join(parsealiases(name))
222 commands.norepo += b' %s' % b' '.join(parsealiases(name))
223 return _command(name, list(options), synopsis)
223 return _command(name, list(options), synopsis)
224 else:
224 else:
225 # for "historical portability":
225 # for "historical portability":
226 # define "@command" annotation locally, because cmdutil.command
226 # define "@command" annotation locally, because cmdutil.command
227 # has been available since 1.9 (or 2daa5179e73f)
227 # has been available since 1.9 (or 2daa5179e73f)
228 def command(name, options=(), synopsis=None, norepo=False):
228 def command(name, options=(), synopsis=None, norepo=False):
229 def decorator(func):
229 def decorator(func):
230 if synopsis:
230 if synopsis:
231 cmdtable[name] = func, list(options), synopsis
231 cmdtable[name] = func, list(options), synopsis
232 else:
232 else:
233 cmdtable[name] = func, list(options)
233 cmdtable[name] = func, list(options)
234 if norepo:
234 if norepo:
235 commands.norepo += b' %s' % b' '.join(parsealiases(name))
235 commands.norepo += b' %s' % b' '.join(parsealiases(name))
236 return func
236 return func
237 return decorator
237 return decorator
238
238
239 try:
239 try:
240 import mercurial.registrar
240 import mercurial.registrar
241 import mercurial.configitems
241 import mercurial.configitems
242 configtable = {}
242 configtable = {}
243 configitem = mercurial.registrar.configitem(configtable)
243 configitem = mercurial.registrar.configitem(configtable)
244 configitem(b'perf', b'presleep',
244 configitem(b'perf', b'presleep',
245 default=mercurial.configitems.dynamicdefault,
245 default=mercurial.configitems.dynamicdefault,
246 experimental=True,
246 experimental=True,
247 )
247 )
248 configitem(b'perf', b'stub',
248 configitem(b'perf', b'stub',
249 default=mercurial.configitems.dynamicdefault,
249 default=mercurial.configitems.dynamicdefault,
250 experimental=True,
250 experimental=True,
251 )
251 )
252 configitem(b'perf', b'parentscount',
252 configitem(b'perf', b'parentscount',
253 default=mercurial.configitems.dynamicdefault,
253 default=mercurial.configitems.dynamicdefault,
254 experimental=True,
254 experimental=True,
255 )
255 )
256 configitem(b'perf', b'all-timing',
256 configitem(b'perf', b'all-timing',
257 default=mercurial.configitems.dynamicdefault,
257 default=mercurial.configitems.dynamicdefault,
258 experimental=True,
258 experimental=True,
259 )
259 )
260 configitem(b'perf', b'pre-run',
260 configitem(b'perf', b'pre-run',
261 default=mercurial.configitems.dynamicdefault,
261 default=mercurial.configitems.dynamicdefault,
262 )
262 )
263 configitem(b'perf', b'profile-benchmark',
263 configitem(b'perf', b'profile-benchmark',
264 default=mercurial.configitems.dynamicdefault,
264 default=mercurial.configitems.dynamicdefault,
265 )
265 )
266 configitem(b'perf', b'run-limits',
266 configitem(b'perf', b'run-limits',
267 default=mercurial.configitems.dynamicdefault,
267 default=mercurial.configitems.dynamicdefault,
268 experimental=True,
268 experimental=True,
269 )
269 )
270 except (ImportError, AttributeError):
270 except (ImportError, AttributeError):
271 pass
271 pass
272 except TypeError:
272 except TypeError:
273 # compatibility fix for a11fd395e83f
273 # compatibility fix for a11fd395e83f
274 # hg version: 5.2
274 # hg version: 5.2
275 configitem(b'perf', b'presleep',
275 configitem(b'perf', b'presleep',
276 default=mercurial.configitems.dynamicdefault,
276 default=mercurial.configitems.dynamicdefault,
277 )
277 )
278 configitem(b'perf', b'stub',
278 configitem(b'perf', b'stub',
279 default=mercurial.configitems.dynamicdefault,
279 default=mercurial.configitems.dynamicdefault,
280 )
280 )
281 configitem(b'perf', b'parentscount',
281 configitem(b'perf', b'parentscount',
282 default=mercurial.configitems.dynamicdefault,
282 default=mercurial.configitems.dynamicdefault,
283 )
283 )
284 configitem(b'perf', b'all-timing',
284 configitem(b'perf', b'all-timing',
285 default=mercurial.configitems.dynamicdefault,
285 default=mercurial.configitems.dynamicdefault,
286 )
286 )
287 configitem(b'perf', b'pre-run',
287 configitem(b'perf', b'pre-run',
288 default=mercurial.configitems.dynamicdefault,
288 default=mercurial.configitems.dynamicdefault,
289 )
289 )
290 configitem(b'perf', b'profile-benchmark',
290 configitem(b'perf', b'profile-benchmark',
291 default=mercurial.configitems.dynamicdefault,
291 default=mercurial.configitems.dynamicdefault,
292 )
292 )
293 configitem(b'perf', b'run-limits',
293 configitem(b'perf', b'run-limits',
294 default=mercurial.configitems.dynamicdefault,
294 default=mercurial.configitems.dynamicdefault,
295 )
295 )
296
296
297 def getlen(ui):
297 def getlen(ui):
298 if ui.configbool(b"perf", b"stub", False):
298 if ui.configbool(b"perf", b"stub", False):
299 return lambda x: 1
299 return lambda x: 1
300 return len
300 return len
301
301
302 class noop(object):
302 class noop(object):
303 """dummy context manager"""
303 """dummy context manager"""
304 def __enter__(self):
304 def __enter__(self):
305 pass
305 pass
306 def __exit__(self, *args):
306 def __exit__(self, *args):
307 pass
307 pass
308
308
309 NOOPCTX = noop()
309 NOOPCTX = noop()
310
310
311 def gettimer(ui, opts=None):
311 def gettimer(ui, opts=None):
312 """return a timer function and formatter: (timer, formatter)
312 """return a timer function and formatter: (timer, formatter)
313
313
314 This function exists to gather the creation of formatter in a single
314 This function exists to gather the creation of formatter in a single
315 place instead of duplicating it in all performance commands."""
315 place instead of duplicating it in all performance commands."""
316
316
317 # enforce an idle period before execution to counteract power management
317 # enforce an idle period before execution to counteract power management
318 # experimental config: perf.presleep
318 # experimental config: perf.presleep
319 time.sleep(getint(ui, b"perf", b"presleep", 1))
319 time.sleep(getint(ui, b"perf", b"presleep", 1))
320
320
321 if opts is None:
321 if opts is None:
322 opts = {}
322 opts = {}
323 # redirect all to stderr unless buffer api is in use
323 # redirect all to stderr unless buffer api is in use
324 if not ui._buffers:
324 if not ui._buffers:
325 ui = ui.copy()
325 ui = ui.copy()
326 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
326 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
327 if uifout:
327 if uifout:
328 # for "historical portability":
328 # for "historical portability":
329 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
329 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
330 uifout.set(ui.ferr)
330 uifout.set(ui.ferr)
331
331
332 # get a formatter
332 # get a formatter
333 uiformatter = getattr(ui, 'formatter', None)
333 uiformatter = getattr(ui, 'formatter', None)
334 if uiformatter:
334 if uiformatter:
335 fm = uiformatter(b'perf', opts)
335 fm = uiformatter(b'perf', opts)
336 else:
336 else:
337 # for "historical portability":
337 # for "historical portability":
338 # define formatter locally, because ui.formatter has been
338 # define formatter locally, because ui.formatter has been
339 # available since 2.2 (or ae5f92e154d3)
339 # available since 2.2 (or ae5f92e154d3)
340 from mercurial import node
340 from mercurial import node
341 class defaultformatter(object):
341 class defaultformatter(object):
342 """Minimized composition of baseformatter and plainformatter
342 """Minimized composition of baseformatter and plainformatter
343 """
343 """
344 def __init__(self, ui, topic, opts):
344 def __init__(self, ui, topic, opts):
345 self._ui = ui
345 self._ui = ui
346 if ui.debugflag:
346 if ui.debugflag:
347 self.hexfunc = node.hex
347 self.hexfunc = node.hex
348 else:
348 else:
349 self.hexfunc = node.short
349 self.hexfunc = node.short
350 def __nonzero__(self):
350 def __nonzero__(self):
351 return False
351 return False
352 __bool__ = __nonzero__
352 __bool__ = __nonzero__
353 def startitem(self):
353 def startitem(self):
354 pass
354 pass
355 def data(self, **data):
355 def data(self, **data):
356 pass
356 pass
357 def write(self, fields, deftext, *fielddata, **opts):
357 def write(self, fields, deftext, *fielddata, **opts):
358 self._ui.write(deftext % fielddata, **opts)
358 self._ui.write(deftext % fielddata, **opts)
359 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
359 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
360 if cond:
360 if cond:
361 self._ui.write(deftext % fielddata, **opts)
361 self._ui.write(deftext % fielddata, **opts)
362 def plain(self, text, **opts):
362 def plain(self, text, **opts):
363 self._ui.write(text, **opts)
363 self._ui.write(text, **opts)
364 def end(self):
364 def end(self):
365 pass
365 pass
366 fm = defaultformatter(ui, b'perf', opts)
366 fm = defaultformatter(ui, b'perf', opts)
367
367
368 # stub function, runs code only once instead of in a loop
368 # stub function, runs code only once instead of in a loop
369 # experimental config: perf.stub
369 # experimental config: perf.stub
370 if ui.configbool(b"perf", b"stub", False):
370 if ui.configbool(b"perf", b"stub", False):
371 return functools.partial(stub_timer, fm), fm
371 return functools.partial(stub_timer, fm), fm
372
372
373 # experimental config: perf.all-timing
373 # experimental config: perf.all-timing
374 displayall = ui.configbool(b"perf", b"all-timing", False)
374 displayall = ui.configbool(b"perf", b"all-timing", False)
375
375
376 # experimental config: perf.run-limits
376 # experimental config: perf.run-limits
377 limitspec = ui.configlist(b"perf", b"run-limits", [])
377 limitspec = ui.configlist(b"perf", b"run-limits", [])
378 limits = []
378 limits = []
379 for item in limitspec:
379 for item in limitspec:
380 parts = item.split(b'-', 1)
380 parts = item.split(b'-', 1)
381 if len(parts) < 2:
381 if len(parts) < 2:
382 ui.warn((b'malformatted run limit entry, missing "-": %s\n'
382 ui.warn((b'malformatted run limit entry, missing "-": %s\n'
383 % item))
383 % item))
384 continue
384 continue
385 try:
385 try:
386 time_limit = float(_sysstr(parts[0]))
386 time_limit = float(_sysstr(parts[0]))
387 except ValueError as e:
387 except ValueError as e:
388 ui.warn((b'malformatted run limit entry, %s: %s\n'
388 ui.warn((b'malformatted run limit entry, %s: %s\n'
389 % (_bytestr(e), item)))
389 % (_bytestr(e), item)))
390 continue
390 continue
391 try:
391 try:
392 run_limit = int(_sysstr(parts[1]))
392 run_limit = int(_sysstr(parts[1]))
393 except ValueError as e:
393 except ValueError as e:
394 ui.warn((b'malformatted run limit entry, %s: %s\n'
394 ui.warn((b'malformatted run limit entry, %s: %s\n'
395 % (_bytestr(e), item)))
395 % (_bytestr(e), item)))
396 continue
396 continue
397 limits.append((time_limit, run_limit))
397 limits.append((time_limit, run_limit))
398 if not limits:
398 if not limits:
399 limits = DEFAULTLIMITS
399 limits = DEFAULTLIMITS
400
400
401 profiler = None
401 profiler = None
402 if profiling is not None:
402 if profiling is not None:
403 if ui.configbool(b"perf", b"profile-benchmark", False):
403 if ui.configbool(b"perf", b"profile-benchmark", False):
404 profiler = profiling.profile(ui)
404 profiler = profiling.profile(ui)
405
405
406 prerun = getint(ui, b"perf", b"pre-run", 0)
406 prerun = getint(ui, b"perf", b"pre-run", 0)
407 t = functools.partial(_timer, fm, displayall=displayall, limits=limits,
407 t = functools.partial(_timer, fm, displayall=displayall, limits=limits,
408 prerun=prerun, profiler=profiler)
408 prerun=prerun, profiler=profiler)
409 return t, fm
409 return t, fm
410
410
411 def stub_timer(fm, func, setup=None, title=None):
411 def stub_timer(fm, func, setup=None, title=None):
412 if setup is not None:
412 if setup is not None:
413 setup()
413 setup()
414 func()
414 func()
415
415
416 @contextlib.contextmanager
416 @contextlib.contextmanager
417 def timeone():
417 def timeone():
418 r = []
418 r = []
419 ostart = os.times()
419 ostart = os.times()
420 cstart = util.timer()
420 cstart = util.timer()
421 yield r
421 yield r
422 cstop = util.timer()
422 cstop = util.timer()
423 ostop = os.times()
423 ostop = os.times()
424 a, b = ostart, ostop
424 a, b = ostart, ostop
425 r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
425 r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
426
426
427
427
428 # list of stop condition (elapsed time, minimal run count)
428 # list of stop condition (elapsed time, minimal run count)
429 DEFAULTLIMITS = (
429 DEFAULTLIMITS = (
430 (3.0, 100),
430 (3.0, 100),
431 (10.0, 3),
431 (10.0, 3),
432 )
432 )
433
433
434 def _timer(fm, func, setup=None, title=None, displayall=False,
434 def _timer(fm, func, setup=None, title=None, displayall=False,
435 limits=DEFAULTLIMITS, prerun=0, profiler=None):
435 limits=DEFAULTLIMITS, prerun=0, profiler=None):
436 gc.collect()
436 gc.collect()
437 results = []
437 results = []
438 begin = util.timer()
438 begin = util.timer()
439 count = 0
439 count = 0
440 if profiler is None:
440 if profiler is None:
441 profiler = NOOPCTX
441 profiler = NOOPCTX
442 for i in range(prerun):
442 for i in range(prerun):
443 if setup is not None:
443 if setup is not None:
444 setup()
444 setup()
445 func()
445 func()
446 keepgoing = True
446 keepgoing = True
447 while keepgoing:
447 while keepgoing:
448 if setup is not None:
448 if setup is not None:
449 setup()
449 setup()
450 with profiler:
450 with profiler:
451 with timeone() as item:
451 with timeone() as item:
452 r = func()
452 r = func()
453 profiler = NOOPCTX
453 profiler = NOOPCTX
454 count += 1
454 count += 1
455 results.append(item[0])
455 results.append(item[0])
456 cstop = util.timer()
456 cstop = util.timer()
457 # Look for a stop condition.
457 # Look for a stop condition.
458 elapsed = cstop - begin
458 elapsed = cstop - begin
459 for t, mincount in limits:
459 for t, mincount in limits:
460 if elapsed >= t and count >= mincount:
460 if elapsed >= t and count >= mincount:
461 keepgoing = False
461 keepgoing = False
462 break
462 break
463
463
464 formatone(fm, results, title=title, result=r,
464 formatone(fm, results, title=title, result=r,
465 displayall=displayall)
465 displayall=displayall)
466
466
467 def formatone(fm, timings, title=None, result=None, displayall=False):
467 def formatone(fm, timings, title=None, result=None, displayall=False):
468
468
469 count = len(timings)
469 count = len(timings)
470
470
471 fm.startitem()
471 fm.startitem()
472
472
473 if title:
473 if title:
474 fm.write(b'title', b'! %s\n', title)
474 fm.write(b'title', b'! %s\n', title)
475 if result:
475 if result:
476 fm.write(b'result', b'! result: %s\n', result)
476 fm.write(b'result', b'! result: %s\n', result)
477 def display(role, entry):
477 def display(role, entry):
478 prefix = b''
478 prefix = b''
479 if role != b'best':
479 if role != b'best':
480 prefix = b'%s.' % role
480 prefix = b'%s.' % role
481 fm.plain(b'!')
481 fm.plain(b'!')
482 fm.write(prefix + b'wall', b' wall %f', entry[0])
482 fm.write(prefix + b'wall', b' wall %f', entry[0])
483 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
483 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
484 fm.write(prefix + b'user', b' user %f', entry[1])
484 fm.write(prefix + b'user', b' user %f', entry[1])
485 fm.write(prefix + b'sys', b' sys %f', entry[2])
485 fm.write(prefix + b'sys', b' sys %f', entry[2])
486 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
486 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
487 fm.plain(b'\n')
487 fm.plain(b'\n')
488 timings.sort()
488 timings.sort()
489 min_val = timings[0]
489 min_val = timings[0]
490 display(b'best', min_val)
490 display(b'best', min_val)
491 if displayall:
491 if displayall:
492 max_val = timings[-1]
492 max_val = timings[-1]
493 display(b'max', max_val)
493 display(b'max', max_val)
494 avg = tuple([sum(x) / count for x in zip(*timings)])
494 avg = tuple([sum(x) / count for x in zip(*timings)])
495 display(b'avg', avg)
495 display(b'avg', avg)
496 median = timings[len(timings) // 2]
496 median = timings[len(timings) // 2]
497 display(b'median', median)
497 display(b'median', median)
498
498
499 # utilities for historical portability
499 # utilities for historical portability
500
500
501 def getint(ui, section, name, default):
501 def getint(ui, section, name, default):
502 # for "historical portability":
502 # for "historical portability":
503 # ui.configint has been available since 1.9 (or fa2b596db182)
503 # ui.configint has been available since 1.9 (or fa2b596db182)
504 v = ui.config(section, name, None)
504 v = ui.config(section, name, None)
505 if v is None:
505 if v is None:
506 return default
506 return default
507 try:
507 try:
508 return int(v)
508 return int(v)
509 except ValueError:
509 except ValueError:
510 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
510 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
511 % (section, name, v))
511 % (section, name, v))
512
512
513 def safeattrsetter(obj, name, ignoremissing=False):
513 def safeattrsetter(obj, name, ignoremissing=False):
514 """Ensure that 'obj' has 'name' attribute before subsequent setattr
514 """Ensure that 'obj' has 'name' attribute before subsequent setattr
515
515
516 This function is aborted, if 'obj' doesn't have 'name' attribute
516 This function is aborted, if 'obj' doesn't have 'name' attribute
517 at runtime. This avoids overlooking removal of an attribute, which
517 at runtime. This avoids overlooking removal of an attribute, which
518 breaks assumption of performance measurement, in the future.
518 breaks assumption of performance measurement, in the future.
519
519
520 This function returns the object to (1) assign a new value, and
520 This function returns the object to (1) assign a new value, and
521 (2) restore an original value to the attribute.
521 (2) restore an original value to the attribute.
522
522
523 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
523 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
524 abortion, and this function returns None. This is useful to
524 abortion, and this function returns None. This is useful to
525 examine an attribute, which isn't ensured in all Mercurial
525 examine an attribute, which isn't ensured in all Mercurial
526 versions.
526 versions.
527 """
527 """
528 if not util.safehasattr(obj, name):
528 if not util.safehasattr(obj, name):
529 if ignoremissing:
529 if ignoremissing:
530 return None
530 return None
531 raise error.Abort((b"missing attribute %s of %s might break assumption"
531 raise error.Abort((b"missing attribute %s of %s might break assumption"
532 b" of performance measurement") % (name, obj))
532 b" of performance measurement") % (name, obj))
533
533
534 origvalue = getattr(obj, _sysstr(name))
534 origvalue = getattr(obj, _sysstr(name))
535 class attrutil(object):
535 class attrutil(object):
536 def set(self, newvalue):
536 def set(self, newvalue):
537 setattr(obj, _sysstr(name), newvalue)
537 setattr(obj, _sysstr(name), newvalue)
538 def restore(self):
538 def restore(self):
539 setattr(obj, _sysstr(name), origvalue)
539 setattr(obj, _sysstr(name), origvalue)
540
540
541 return attrutil()
541 return attrutil()
542
542
543 # utilities to examine each internal API changes
543 # utilities to examine each internal API changes
544
544
545 def getbranchmapsubsettable():
545 def getbranchmapsubsettable():
546 # for "historical portability":
546 # for "historical portability":
547 # subsettable is defined in:
547 # subsettable is defined in:
548 # - branchmap since 2.9 (or 175c6fd8cacc)
548 # - branchmap since 2.9 (or 175c6fd8cacc)
549 # - repoview since 2.5 (or 59a9f18d4587)
549 # - repoview since 2.5 (or 59a9f18d4587)
550 # - repoviewutil since 5.0
550 # - repoviewutil since 5.0
551 for mod in (branchmap, repoview, repoviewutil):
551 for mod in (branchmap, repoview, repoviewutil):
552 subsettable = getattr(mod, 'subsettable', None)
552 subsettable = getattr(mod, 'subsettable', None)
553 if subsettable:
553 if subsettable:
554 return subsettable
554 return subsettable
555
555
556 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
556 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
557 # branchmap and repoview modules exist, but subsettable attribute
557 # branchmap and repoview modules exist, but subsettable attribute
558 # doesn't)
558 # doesn't)
559 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
559 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
560 hint=b"use 2.5 or later")
560 hint=b"use 2.5 or later")
561
561
562 def getsvfs(repo):
562 def getsvfs(repo):
563 """Return appropriate object to access files under .hg/store
563 """Return appropriate object to access files under .hg/store
564 """
564 """
565 # for "historical portability":
565 # for "historical portability":
566 # repo.svfs has been available since 2.3 (or 7034365089bf)
566 # repo.svfs has been available since 2.3 (or 7034365089bf)
567 svfs = getattr(repo, 'svfs', None)
567 svfs = getattr(repo, 'svfs', None)
568 if svfs:
568 if svfs:
569 return svfs
569 return svfs
570 else:
570 else:
571 return getattr(repo, 'sopener')
571 return getattr(repo, 'sopener')
572
572
573 def getvfs(repo):
573 def getvfs(repo):
574 """Return appropriate object to access files under .hg
574 """Return appropriate object to access files under .hg
575 """
575 """
576 # for "historical portability":
576 # for "historical portability":
577 # repo.vfs has been available since 2.3 (or 7034365089bf)
577 # repo.vfs has been available since 2.3 (or 7034365089bf)
578 vfs = getattr(repo, 'vfs', None)
578 vfs = getattr(repo, 'vfs', None)
579 if vfs:
579 if vfs:
580 return vfs
580 return vfs
581 else:
581 else:
582 return getattr(repo, 'opener')
582 return getattr(repo, 'opener')
583
583
584 def repocleartagscachefunc(repo):
584 def repocleartagscachefunc(repo):
585 """Return the function to clear tags cache according to repo internal API
585 """Return the function to clear tags cache according to repo internal API
586 """
586 """
587 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
587 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
588 # in this case, setattr(repo, '_tagscache', None) or so isn't
588 # in this case, setattr(repo, '_tagscache', None) or so isn't
589 # correct way to clear tags cache, because existing code paths
589 # correct way to clear tags cache, because existing code paths
590 # expect _tagscache to be a structured object.
590 # expect _tagscache to be a structured object.
591 def clearcache():
591 def clearcache():
592 # _tagscache has been filteredpropertycache since 2.5 (or
592 # _tagscache has been filteredpropertycache since 2.5 (or
593 # 98c867ac1330), and delattr() can't work in such case
593 # 98c867ac1330), and delattr() can't work in such case
594 if b'_tagscache' in vars(repo):
594 if b'_tagscache' in vars(repo):
595 del repo.__dict__[b'_tagscache']
595 del repo.__dict__[b'_tagscache']
596 return clearcache
596 return clearcache
597
597
598 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
598 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
599 if repotags: # since 1.4 (or 5614a628d173)
599 if repotags: # since 1.4 (or 5614a628d173)
600 return lambda : repotags.set(None)
600 return lambda : repotags.set(None)
601
601
602 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
602 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
603 if repotagscache: # since 0.6 (or d7df759d0e97)
603 if repotagscache: # since 0.6 (or d7df759d0e97)
604 return lambda : repotagscache.set(None)
604 return lambda : repotagscache.set(None)
605
605
606 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
606 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
607 # this point, but it isn't so problematic, because:
607 # this point, but it isn't so problematic, because:
608 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
608 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
609 # in perftags() causes failure soon
609 # in perftags() causes failure soon
610 # - perf.py itself has been available since 1.1 (or eb240755386d)
610 # - perf.py itself has been available since 1.1 (or eb240755386d)
611 raise error.Abort((b"tags API of this hg command is unknown"))
611 raise error.Abort((b"tags API of this hg command is unknown"))
612
612
613 # utilities to clear cache
613 # utilities to clear cache
614
614
615 def clearfilecache(obj, attrname):
615 def clearfilecache(obj, attrname):
616 unfiltered = getattr(obj, 'unfiltered', None)
616 unfiltered = getattr(obj, 'unfiltered', None)
617 if unfiltered is not None:
617 if unfiltered is not None:
618 obj = obj.unfiltered()
618 obj = obj.unfiltered()
619 if attrname in vars(obj):
619 if attrname in vars(obj):
620 delattr(obj, attrname)
620 delattr(obj, attrname)
621 obj._filecache.pop(attrname, None)
621 obj._filecache.pop(attrname, None)
622
622
623 def clearchangelog(repo):
623 def clearchangelog(repo):
624 if repo is not repo.unfiltered():
624 if repo is not repo.unfiltered():
625 object.__setattr__(repo, r'_clcachekey', None)
625 object.__setattr__(repo, r'_clcachekey', None)
626 object.__setattr__(repo, r'_clcache', None)
626 object.__setattr__(repo, r'_clcache', None)
627 clearfilecache(repo.unfiltered(), 'changelog')
627 clearfilecache(repo.unfiltered(), 'changelog')
628
628
629 # perf commands
629 # perf commands
630
630
631 @command(b'perfwalk', formatteropts)
631 @command(b'perfwalk', formatteropts)
632 def perfwalk(ui, repo, *pats, **opts):
632 def perfwalk(ui, repo, *pats, **opts):
633 opts = _byteskwargs(opts)
633 opts = _byteskwargs(opts)
634 timer, fm = gettimer(ui, opts)
634 timer, fm = gettimer(ui, opts)
635 m = scmutil.match(repo[None], pats, {})
635 m = scmutil.match(repo[None], pats, {})
636 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
636 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
637 ignored=False))))
637 ignored=False))))
638 fm.end()
638 fm.end()
639
639
640 @command(b'perfannotate', formatteropts)
640 @command(b'perfannotate', formatteropts)
641 def perfannotate(ui, repo, f, **opts):
641 def perfannotate(ui, repo, f, **opts):
642 opts = _byteskwargs(opts)
642 opts = _byteskwargs(opts)
643 timer, fm = gettimer(ui, opts)
643 timer, fm = gettimer(ui, opts)
644 fc = repo[b'.'][f]
644 fc = repo[b'.'][f]
645 timer(lambda: len(fc.annotate(True)))
645 timer(lambda: len(fc.annotate(True)))
646 fm.end()
646 fm.end()
647
647
648 @command(b'perfstatus',
648 @command(b'perfstatus',
649 [(b'u', b'unknown', False,
649 [(b'u', b'unknown', False,
650 b'ask status to look for unknown files')] + formatteropts)
650 b'ask status to look for unknown files')] + formatteropts)
651 def perfstatus(ui, repo, **opts):
651 def perfstatus(ui, repo, **opts):
652 opts = _byteskwargs(opts)
652 opts = _byteskwargs(opts)
653 #m = match.always(repo.root, repo.getcwd())
653 #m = match.always(repo.root, repo.getcwd())
654 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
654 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
655 # False))))
655 # False))))
656 timer, fm = gettimer(ui, opts)
656 timer, fm = gettimer(ui, opts)
657 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
657 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
658 fm.end()
658 fm.end()
659
659
660 @command(b'perfaddremove', formatteropts)
660 @command(b'perfaddremove', formatteropts)
661 def perfaddremove(ui, repo, **opts):
661 def perfaddremove(ui, repo, **opts):
662 opts = _byteskwargs(opts)
662 opts = _byteskwargs(opts)
663 timer, fm = gettimer(ui, opts)
663 timer, fm = gettimer(ui, opts)
664 try:
664 try:
665 oldquiet = repo.ui.quiet
665 oldquiet = repo.ui.quiet
666 repo.ui.quiet = True
666 repo.ui.quiet = True
667 matcher = scmutil.match(repo[None])
667 matcher = scmutil.match(repo[None])
668 opts[b'dry_run'] = True
668 opts[b'dry_run'] = True
669 if b'uipathfn' in getargspec(scmutil.addremove).args:
669 if b'uipathfn' in getargspec(scmutil.addremove).args:
670 uipathfn = scmutil.getuipathfn(repo)
670 uipathfn = scmutil.getuipathfn(repo)
671 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
671 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
672 else:
672 else:
673 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
673 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
674 finally:
674 finally:
675 repo.ui.quiet = oldquiet
675 repo.ui.quiet = oldquiet
676 fm.end()
676 fm.end()
677
677
678 def clearcaches(cl):
678 def clearcaches(cl):
679 # behave somewhat consistently across internal API changes
679 # behave somewhat consistently across internal API changes
680 if util.safehasattr(cl, b'clearcaches'):
680 if util.safehasattr(cl, b'clearcaches'):
681 cl.clearcaches()
681 cl.clearcaches()
682 elif util.safehasattr(cl, b'_nodecache'):
682 elif util.safehasattr(cl, b'_nodecache'):
683 from mercurial.node import nullid, nullrev
683 from mercurial.node import nullid, nullrev
684 cl._nodecache = {nullid: nullrev}
684 cl._nodecache = {nullid: nullrev}
685 cl._nodepos = None
685 cl._nodepos = None
686
686
687 @command(b'perfheads', formatteropts)
687 @command(b'perfheads', formatteropts)
688 def perfheads(ui, repo, **opts):
688 def perfheads(ui, repo, **opts):
689 """benchmark the computation of a changelog heads"""
689 """benchmark the computation of a changelog heads"""
690 opts = _byteskwargs(opts)
690 opts = _byteskwargs(opts)
691 timer, fm = gettimer(ui, opts)
691 timer, fm = gettimer(ui, opts)
692 cl = repo.changelog
692 cl = repo.changelog
693 def s():
693 def s():
694 clearcaches(cl)
694 clearcaches(cl)
695 def d():
695 def d():
696 len(cl.headrevs())
696 len(cl.headrevs())
697 timer(d, setup=s)
697 timer(d, setup=s)
698 fm.end()
698 fm.end()
699
699
700 @command(b'perftags', formatteropts+
700 @command(b'perftags', formatteropts+
701 [
701 [
702 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
702 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
703 ])
703 ])
704 def perftags(ui, repo, **opts):
704 def perftags(ui, repo, **opts):
705 opts = _byteskwargs(opts)
705 opts = _byteskwargs(opts)
706 timer, fm = gettimer(ui, opts)
706 timer, fm = gettimer(ui, opts)
707 repocleartagscache = repocleartagscachefunc(repo)
707 repocleartagscache = repocleartagscachefunc(repo)
708 clearrevlogs = opts[b'clear_revlogs']
708 clearrevlogs = opts[b'clear_revlogs']
709 def s():
709 def s():
710 if clearrevlogs:
710 if clearrevlogs:
711 clearchangelog(repo)
711 clearchangelog(repo)
712 clearfilecache(repo.unfiltered(), 'manifest')
712 clearfilecache(repo.unfiltered(), 'manifest')
713 repocleartagscache()
713 repocleartagscache()
714 def t():
714 def t():
715 return len(repo.tags())
715 return len(repo.tags())
716 timer(t, setup=s)
716 timer(t, setup=s)
717 fm.end()
717 fm.end()
718
718
719 @command(b'perfancestors', formatteropts)
719 @command(b'perfancestors', formatteropts)
720 def perfancestors(ui, repo, **opts):
720 def perfancestors(ui, repo, **opts):
721 opts = _byteskwargs(opts)
721 opts = _byteskwargs(opts)
722 timer, fm = gettimer(ui, opts)
722 timer, fm = gettimer(ui, opts)
723 heads = repo.changelog.headrevs()
723 heads = repo.changelog.headrevs()
724 def d():
724 def d():
725 for a in repo.changelog.ancestors(heads):
725 for a in repo.changelog.ancestors(heads):
726 pass
726 pass
727 timer(d)
727 timer(d)
728 fm.end()
728 fm.end()
729
729
730 @command(b'perfancestorset', formatteropts)
730 @command(b'perfancestorset', formatteropts)
731 def perfancestorset(ui, repo, revset, **opts):
731 def perfancestorset(ui, repo, revset, **opts):
732 opts = _byteskwargs(opts)
732 opts = _byteskwargs(opts)
733 timer, fm = gettimer(ui, opts)
733 timer, fm = gettimer(ui, opts)
734 revs = repo.revs(revset)
734 revs = repo.revs(revset)
735 heads = repo.changelog.headrevs()
735 heads = repo.changelog.headrevs()
736 def d():
736 def d():
737 s = repo.changelog.ancestors(heads)
737 s = repo.changelog.ancestors(heads)
738 for rev in revs:
738 for rev in revs:
739 rev in s
739 rev in s
740 timer(d)
740 timer(d)
741 fm.end()
741 fm.end()
742
742
743 @command(b'perfdiscovery', formatteropts, b'PATH')
743 @command(b'perfdiscovery', formatteropts, b'PATH')
744 def perfdiscovery(ui, repo, path, **opts):
744 def perfdiscovery(ui, repo, path, **opts):
745 """benchmark discovery between local repo and the peer at given path
745 """benchmark discovery between local repo and the peer at given path
746 """
746 """
747 repos = [repo, None]
747 repos = [repo, None]
748 timer, fm = gettimer(ui, opts)
748 timer, fm = gettimer(ui, opts)
749 path = ui.expandpath(path)
749 path = ui.expandpath(path)
750
750
751 def s():
751 def s():
752 repos[1] = hg.peer(ui, opts, path)
752 repos[1] = hg.peer(ui, opts, path)
753 def d():
753 def d():
754 setdiscovery.findcommonheads(ui, *repos)
754 setdiscovery.findcommonheads(ui, *repos)
755 timer(d, setup=s)
755 timer(d, setup=s)
756 fm.end()
756 fm.end()
757
757
758 @command(b'perfbookmarks', formatteropts +
758 @command(b'perfbookmarks', formatteropts +
759 [
759 [
760 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
760 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
761 ])
761 ])
762 def perfbookmarks(ui, repo, **opts):
762 def perfbookmarks(ui, repo, **opts):
763 """benchmark parsing bookmarks from disk to memory"""
763 """benchmark parsing bookmarks from disk to memory"""
764 opts = _byteskwargs(opts)
764 opts = _byteskwargs(opts)
765 timer, fm = gettimer(ui, opts)
765 timer, fm = gettimer(ui, opts)
766
766
767 clearrevlogs = opts[b'clear_revlogs']
767 clearrevlogs = opts[b'clear_revlogs']
768 def s():
768 def s():
769 if clearrevlogs:
769 if clearrevlogs:
770 clearchangelog(repo)
770 clearchangelog(repo)
771 clearfilecache(repo, b'_bookmarks')
771 clearfilecache(repo, b'_bookmarks')
772 def d():
772 def d():
773 repo._bookmarks
773 repo._bookmarks
774 timer(d, setup=s)
774 timer(d, setup=s)
775 fm.end()
775 fm.end()
776
776
777 @command(b'perfbundleread', formatteropts, b'BUNDLE')
777 @command(b'perfbundleread', formatteropts, b'BUNDLE')
778 def perfbundleread(ui, repo, bundlepath, **opts):
778 def perfbundleread(ui, repo, bundlepath, **opts):
779 """Benchmark reading of bundle files.
779 """Benchmark reading of bundle files.
780
780
781 This command is meant to isolate the I/O part of bundle reading as
781 This command is meant to isolate the I/O part of bundle reading as
782 much as possible.
782 much as possible.
783 """
783 """
784 from mercurial import (
784 from mercurial import (
785 bundle2,
785 bundle2,
786 exchange,
786 exchange,
787 streamclone,
787 streamclone,
788 )
788 )
789
789
790 opts = _byteskwargs(opts)
790 opts = _byteskwargs(opts)
791
791
792 def makebench(fn):
792 def makebench(fn):
793 def run():
793 def run():
794 with open(bundlepath, b'rb') as fh:
794 with open(bundlepath, b'rb') as fh:
795 bundle = exchange.readbundle(ui, fh, bundlepath)
795 bundle = exchange.readbundle(ui, fh, bundlepath)
796 fn(bundle)
796 fn(bundle)
797
797
798 return run
798 return run
799
799
800 def makereadnbytes(size):
800 def makereadnbytes(size):
801 def run():
801 def run():
802 with open(bundlepath, b'rb') as fh:
802 with open(bundlepath, b'rb') as fh:
803 bundle = exchange.readbundle(ui, fh, bundlepath)
803 bundle = exchange.readbundle(ui, fh, bundlepath)
804 while bundle.read(size):
804 while bundle.read(size):
805 pass
805 pass
806
806
807 return run
807 return run
808
808
809 def makestdioread(size):
809 def makestdioread(size):
810 def run():
810 def run():
811 with open(bundlepath, b'rb') as fh:
811 with open(bundlepath, b'rb') as fh:
812 while fh.read(size):
812 while fh.read(size):
813 pass
813 pass
814
814
815 return run
815 return run
816
816
817 # bundle1
817 # bundle1
818
818
819 def deltaiter(bundle):
819 def deltaiter(bundle):
820 for delta in bundle.deltaiter():
820 for delta in bundle.deltaiter():
821 pass
821 pass
822
822
823 def iterchunks(bundle):
823 def iterchunks(bundle):
824 for chunk in bundle.getchunks():
824 for chunk in bundle.getchunks():
825 pass
825 pass
826
826
827 # bundle2
827 # bundle2
828
828
829 def forwardchunks(bundle):
829 def forwardchunks(bundle):
830 for chunk in bundle._forwardchunks():
830 for chunk in bundle._forwardchunks():
831 pass
831 pass
832
832
833 def iterparts(bundle):
833 def iterparts(bundle):
834 for part in bundle.iterparts():
834 for part in bundle.iterparts():
835 pass
835 pass
836
836
837 def iterpartsseekable(bundle):
837 def iterpartsseekable(bundle):
838 for part in bundle.iterparts(seekable=True):
838 for part in bundle.iterparts(seekable=True):
839 pass
839 pass
840
840
841 def seek(bundle):
841 def seek(bundle):
842 for part in bundle.iterparts(seekable=True):
842 for part in bundle.iterparts(seekable=True):
843 part.seek(0, os.SEEK_END)
843 part.seek(0, os.SEEK_END)
844
844
845 def makepartreadnbytes(size):
845 def makepartreadnbytes(size):
846 def run():
846 def run():
847 with open(bundlepath, b'rb') as fh:
847 with open(bundlepath, b'rb') as fh:
848 bundle = exchange.readbundle(ui, fh, bundlepath)
848 bundle = exchange.readbundle(ui, fh, bundlepath)
849 for part in bundle.iterparts():
849 for part in bundle.iterparts():
850 while part.read(size):
850 while part.read(size):
851 pass
851 pass
852
852
853 return run
853 return run
854
854
855 benches = [
855 benches = [
856 (makestdioread(8192), b'read(8k)'),
856 (makestdioread(8192), b'read(8k)'),
857 (makestdioread(16384), b'read(16k)'),
857 (makestdioread(16384), b'read(16k)'),
858 (makestdioread(32768), b'read(32k)'),
858 (makestdioread(32768), b'read(32k)'),
859 (makestdioread(131072), b'read(128k)'),
859 (makestdioread(131072), b'read(128k)'),
860 ]
860 ]
861
861
862 with open(bundlepath, b'rb') as fh:
862 with open(bundlepath, b'rb') as fh:
863 bundle = exchange.readbundle(ui, fh, bundlepath)
863 bundle = exchange.readbundle(ui, fh, bundlepath)
864
864
865 if isinstance(bundle, changegroup.cg1unpacker):
865 if isinstance(bundle, changegroup.cg1unpacker):
866 benches.extend([
866 benches.extend([
867 (makebench(deltaiter), b'cg1 deltaiter()'),
867 (makebench(deltaiter), b'cg1 deltaiter()'),
868 (makebench(iterchunks), b'cg1 getchunks()'),
868 (makebench(iterchunks), b'cg1 getchunks()'),
869 (makereadnbytes(8192), b'cg1 read(8k)'),
869 (makereadnbytes(8192), b'cg1 read(8k)'),
870 (makereadnbytes(16384), b'cg1 read(16k)'),
870 (makereadnbytes(16384), b'cg1 read(16k)'),
871 (makereadnbytes(32768), b'cg1 read(32k)'),
871 (makereadnbytes(32768), b'cg1 read(32k)'),
872 (makereadnbytes(131072), b'cg1 read(128k)'),
872 (makereadnbytes(131072), b'cg1 read(128k)'),
873 ])
873 ])
874 elif isinstance(bundle, bundle2.unbundle20):
874 elif isinstance(bundle, bundle2.unbundle20):
875 benches.extend([
875 benches.extend([
876 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
876 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
877 (makebench(iterparts), b'bundle2 iterparts()'),
877 (makebench(iterparts), b'bundle2 iterparts()'),
878 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
878 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
879 (makebench(seek), b'bundle2 part seek()'),
879 (makebench(seek), b'bundle2 part seek()'),
880 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
880 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
881 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
881 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
882 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
882 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
883 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
883 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
884 ])
884 ])
885 elif isinstance(bundle, streamclone.streamcloneapplier):
885 elif isinstance(bundle, streamclone.streamcloneapplier):
886 raise error.Abort(b'stream clone bundles not supported')
886 raise error.Abort(b'stream clone bundles not supported')
887 else:
887 else:
888 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
888 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
889
889
890 for fn, title in benches:
890 for fn, title in benches:
891 timer, fm = gettimer(ui, opts)
891 timer, fm = gettimer(ui, opts)
892 timer(fn, title=title)
892 timer(fn, title=title)
893 fm.end()
893 fm.end()
894
894
895 @command(b'perfchangegroupchangelog', formatteropts +
895 @command(b'perfchangegroupchangelog', formatteropts +
896 [(b'', b'cgversion', b'02', b'changegroup version'),
896 [(b'', b'cgversion', b'02', b'changegroup version'),
897 (b'r', b'rev', b'', b'revisions to add to changegroup')])
897 (b'r', b'rev', b'', b'revisions to add to changegroup')])
898 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
898 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
899 """Benchmark producing a changelog group for a changegroup.
899 """Benchmark producing a changelog group for a changegroup.
900
900
901 This measures the time spent processing the changelog during a
901 This measures the time spent processing the changelog during a
902 bundle operation. This occurs during `hg bundle` and on a server
902 bundle operation. This occurs during `hg bundle` and on a server
903 processing a `getbundle` wire protocol request (handles clones
903 processing a `getbundle` wire protocol request (handles clones
904 and pull requests).
904 and pull requests).
905
905
906 By default, all revisions are added to the changegroup.
906 By default, all revisions are added to the changegroup.
907 """
907 """
908 opts = _byteskwargs(opts)
908 opts = _byteskwargs(opts)
909 cl = repo.changelog
909 cl = repo.changelog
910 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
910 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
911 bundler = changegroup.getbundler(cgversion, repo)
911 bundler = changegroup.getbundler(cgversion, repo)
912
912
913 def d():
913 def d():
914 state, chunks = bundler._generatechangelog(cl, nodes)
914 state, chunks = bundler._generatechangelog(cl, nodes)
915 for chunk in chunks:
915 for chunk in chunks:
916 pass
916 pass
917
917
918 timer, fm = gettimer(ui, opts)
918 timer, fm = gettimer(ui, opts)
919
919
920 # Terminal printing can interfere with timing. So disable it.
920 # Terminal printing can interfere with timing. So disable it.
921 with ui.configoverride({(b'progress', b'disable'): True}):
921 with ui.configoverride({(b'progress', b'disable'): True}):
922 timer(d)
922 timer(d)
923
923
924 fm.end()
924 fm.end()
925
925
926 @command(b'perfdirs', formatteropts)
926 @command(b'perfdirs', formatteropts)
927 def perfdirs(ui, repo, **opts):
927 def perfdirs(ui, repo, **opts):
928 opts = _byteskwargs(opts)
928 opts = _byteskwargs(opts)
929 timer, fm = gettimer(ui, opts)
929 timer, fm = gettimer(ui, opts)
930 dirstate = repo.dirstate
930 dirstate = repo.dirstate
931 b'a' in dirstate
931 b'a' in dirstate
932 def d():
932 def d():
933 dirstate.hasdir(b'a')
933 dirstate.hasdir(b'a')
934 del dirstate._map._dirs
934 del dirstate._map._dirs
935 timer(d)
935 timer(d)
936 fm.end()
936 fm.end()
937
937
938 @command(b'perfdirstate', formatteropts)
938 @command(b'perfdirstate', formatteropts)
939 def perfdirstate(ui, repo, **opts):
939 def perfdirstate(ui, repo, **opts):
940 opts = _byteskwargs(opts)
940 opts = _byteskwargs(opts)
941 timer, fm = gettimer(ui, opts)
941 timer, fm = gettimer(ui, opts)
942 b"a" in repo.dirstate
942 b"a" in repo.dirstate
943 def d():
943 def d():
944 repo.dirstate.invalidate()
944 repo.dirstate.invalidate()
945 b"a" in repo.dirstate
945 b"a" in repo.dirstate
946 timer(d)
946 timer(d)
947 fm.end()
947 fm.end()
948
948
949 @command(b'perfdirstatedirs', formatteropts)
949 @command(b'perfdirstatedirs', formatteropts)
950 def perfdirstatedirs(ui, repo, **opts):
950 def perfdirstatedirs(ui, repo, **opts):
951 opts = _byteskwargs(opts)
951 opts = _byteskwargs(opts)
952 timer, fm = gettimer(ui, opts)
952 timer, fm = gettimer(ui, opts)
953 b"a" in repo.dirstate
953 b"a" in repo.dirstate
954 def d():
954 def d():
955 repo.dirstate.hasdir(b"a")
955 repo.dirstate.hasdir(b"a")
956 del repo.dirstate._map._dirs
956 del repo.dirstate._map._dirs
957 timer(d)
957 timer(d)
958 fm.end()
958 fm.end()
959
959
960 @command(b'perfdirstatefoldmap', formatteropts)
960 @command(b'perfdirstatefoldmap', formatteropts)
961 def perfdirstatefoldmap(ui, repo, **opts):
961 def perfdirstatefoldmap(ui, repo, **opts):
962 opts = _byteskwargs(opts)
962 opts = _byteskwargs(opts)
963 timer, fm = gettimer(ui, opts)
963 timer, fm = gettimer(ui, opts)
964 dirstate = repo.dirstate
964 dirstate = repo.dirstate
965 b'a' in dirstate
965 b'a' in dirstate
966 def d():
966 def d():
967 dirstate._map.filefoldmap.get(b'a')
967 dirstate._map.filefoldmap.get(b'a')
968 del dirstate._map.filefoldmap
968 del dirstate._map.filefoldmap
969 timer(d)
969 timer(d)
970 fm.end()
970 fm.end()
971
971
972 @command(b'perfdirfoldmap', formatteropts)
972 @command(b'perfdirfoldmap', formatteropts)
973 def perfdirfoldmap(ui, repo, **opts):
973 def perfdirfoldmap(ui, repo, **opts):
974 opts = _byteskwargs(opts)
974 opts = _byteskwargs(opts)
975 timer, fm = gettimer(ui, opts)
975 timer, fm = gettimer(ui, opts)
976 dirstate = repo.dirstate
976 dirstate = repo.dirstate
977 b'a' in dirstate
977 b'a' in dirstate
978 def d():
978 def d():
979 dirstate._map.dirfoldmap.get(b'a')
979 dirstate._map.dirfoldmap.get(b'a')
980 del dirstate._map.dirfoldmap
980 del dirstate._map.dirfoldmap
981 del dirstate._map._dirs
981 del dirstate._map._dirs
982 timer(d)
982 timer(d)
983 fm.end()
983 fm.end()
984
984
985 @command(b'perfdirstatewrite', formatteropts)
985 @command(b'perfdirstatewrite', formatteropts)
986 def perfdirstatewrite(ui, repo, **opts):
986 def perfdirstatewrite(ui, repo, **opts):
987 opts = _byteskwargs(opts)
987 opts = _byteskwargs(opts)
988 timer, fm = gettimer(ui, opts)
988 timer, fm = gettimer(ui, opts)
989 ds = repo.dirstate
989 ds = repo.dirstate
990 b"a" in ds
990 b"a" in ds
991 def d():
991 def d():
992 ds._dirty = True
992 ds._dirty = True
993 ds.write(repo.currenttransaction())
993 ds.write(repo.currenttransaction())
994 timer(d)
994 timer(d)
995 fm.end()
995 fm.end()
996
996
997 def _getmergerevs(repo, opts):
997 def _getmergerevs(repo, opts):
998 """parse command argument to return rev involved in merge
998 """parse command argument to return rev involved in merge
999
999
1000 input: options dictionnary with `rev`, `from` and `bse`
1000 input: options dictionnary with `rev`, `from` and `bse`
1001 output: (localctx, otherctx, basectx)
1001 output: (localctx, otherctx, basectx)
1002 """
1002 """
1003 if opts[b'from']:
1003 if opts[b'from']:
1004 fromrev = scmutil.revsingle(repo, opts[b'from'])
1004 fromrev = scmutil.revsingle(repo, opts[b'from'])
1005 wctx = repo[fromrev]
1005 wctx = repo[fromrev]
1006 else:
1006 else:
1007 wctx = repo[None]
1007 wctx = repo[None]
1008 # we don't want working dir files to be stat'd in the benchmark, so
1008 # we don't want working dir files to be stat'd in the benchmark, so
1009 # prime that cache
1009 # prime that cache
1010 wctx.dirty()
1010 wctx.dirty()
1011 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1011 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1012 if opts[b'base']:
1012 if opts[b'base']:
1013 fromrev = scmutil.revsingle(repo, opts[b'base'])
1013 fromrev = scmutil.revsingle(repo, opts[b'base'])
1014 ancestor = repo[fromrev]
1014 ancestor = repo[fromrev]
1015 else:
1015 else:
1016 ancestor = wctx.ancestor(rctx)
1016 ancestor = wctx.ancestor(rctx)
1017 return (wctx, rctx, ancestor)
1017 return (wctx, rctx, ancestor)
1018
1018
1019 @command(b'perfmergecalculate',
1019 @command(b'perfmergecalculate',
1020 [
1020 [
1021 (b'r', b'rev', b'.', b'rev to merge against'),
1021 (b'r', b'rev', b'.', b'rev to merge against'),
1022 (b'', b'from', b'', b'rev to merge from'),
1022 (b'', b'from', b'', b'rev to merge from'),
1023 (b'', b'base', b'', b'the revision to use as base'),
1023 (b'', b'base', b'', b'the revision to use as base'),
1024 ] + formatteropts)
1024 ] + formatteropts)
1025 def perfmergecalculate(ui, repo, **opts):
1025 def perfmergecalculate(ui, repo, **opts):
1026 opts = _byteskwargs(opts)
1026 opts = _byteskwargs(opts)
1027 timer, fm = gettimer(ui, opts)
1027 timer, fm = gettimer(ui, opts)
1028
1028
1029 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1029 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1030 def d():
1030 def d():
1031 # acceptremote is True because we don't want prompts in the middle of
1031 # acceptremote is True because we don't want prompts in the middle of
1032 # our benchmark
1032 # our benchmark
1033 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
1033 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
1034 acceptremote=True, followcopies=True)
1034 acceptremote=True, followcopies=True)
1035 timer(d)
1035 timer(d)
1036 fm.end()
1036 fm.end()
1037
1037
1038 @command(b'perfmergecopies',
1038 @command(b'perfmergecopies',
1039 [
1039 [
1040 (b'r', b'rev', b'.', b'rev to merge against'),
1040 (b'r', b'rev', b'.', b'rev to merge against'),
1041 (b'', b'from', b'', b'rev to merge from'),
1041 (b'', b'from', b'', b'rev to merge from'),
1042 (b'', b'base', b'', b'the revision to use as base'),
1042 (b'', b'base', b'', b'the revision to use as base'),
1043 ] + formatteropts)
1043 ] + formatteropts)
1044 def perfmergecopies(ui, repo, **opts):
1044 def perfmergecopies(ui, repo, **opts):
1045 """measure runtime of `copies.mergecopies`"""
1045 """measure runtime of `copies.mergecopies`"""
1046 opts = _byteskwargs(opts)
1046 opts = _byteskwargs(opts)
1047 timer, fm = gettimer(ui, opts)
1047 timer, fm = gettimer(ui, opts)
1048 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1048 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1049 def d():
1049 def d():
1050 # acceptremote is True because we don't want prompts in the middle of
1050 # acceptremote is True because we don't want prompts in the middle of
1051 # our benchmark
1051 # our benchmark
1052 copies.mergecopies(repo, wctx, rctx, ancestor)
1052 copies.mergecopies(repo, wctx, rctx, ancestor)
1053 timer(d)
1053 timer(d)
1054 fm.end()
1054 fm.end()
1055
1055
1056 @command(b'perfpathcopies', [], b"REV REV")
1056 @command(b'perfpathcopies', [], b"REV REV")
1057 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1057 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1058 """benchmark the copy tracing logic"""
1058 """benchmark the copy tracing logic"""
1059 opts = _byteskwargs(opts)
1059 opts = _byteskwargs(opts)
1060 timer, fm = gettimer(ui, opts)
1060 timer, fm = gettimer(ui, opts)
1061 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1061 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1062 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1062 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1063 def d():
1063 def d():
1064 copies.pathcopies(ctx1, ctx2)
1064 copies.pathcopies(ctx1, ctx2)
1065 timer(d)
1065 timer(d)
1066 fm.end()
1066 fm.end()
1067
1067
1068 @command(b'perfphases',
1068 @command(b'perfphases',
1069 [(b'', b'full', False, b'include file reading time too'),
1069 [(b'', b'full', False, b'include file reading time too'),
1070 ], b"")
1070 ], b"")
1071 def perfphases(ui, repo, **opts):
1071 def perfphases(ui, repo, **opts):
1072 """benchmark phasesets computation"""
1072 """benchmark phasesets computation"""
1073 opts = _byteskwargs(opts)
1073 opts = _byteskwargs(opts)
1074 timer, fm = gettimer(ui, opts)
1074 timer, fm = gettimer(ui, opts)
1075 _phases = repo._phasecache
1075 _phases = repo._phasecache
1076 full = opts.get(b'full')
1076 full = opts.get(b'full')
1077 def d():
1077 def d():
1078 phases = _phases
1078 phases = _phases
1079 if full:
1079 if full:
1080 clearfilecache(repo, b'_phasecache')
1080 clearfilecache(repo, b'_phasecache')
1081 phases = repo._phasecache
1081 phases = repo._phasecache
1082 phases.invalidate()
1082 phases.invalidate()
1083 phases.loadphaserevs(repo)
1083 phases.loadphaserevs(repo)
1084 timer(d)
1084 timer(d)
1085 fm.end()
1085 fm.end()
1086
1086
1087 @command(b'perfphasesremote',
1087 @command(b'perfphasesremote',
1088 [], b"[DEST]")
1088 [], b"[DEST]")
1089 def perfphasesremote(ui, repo, dest=None, **opts):
1089 def perfphasesremote(ui, repo, dest=None, **opts):
1090 """benchmark time needed to analyse phases of the remote server"""
1090 """benchmark time needed to analyse phases of the remote server"""
1091 from mercurial.node import (
1091 from mercurial.node import (
1092 bin,
1092 bin,
1093 )
1093 )
1094 from mercurial import (
1094 from mercurial import (
1095 exchange,
1095 exchange,
1096 hg,
1096 hg,
1097 phases,
1097 phases,
1098 )
1098 )
1099 opts = _byteskwargs(opts)
1099 opts = _byteskwargs(opts)
1100 timer, fm = gettimer(ui, opts)
1100 timer, fm = gettimer(ui, opts)
1101
1101
1102 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
1102 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
1103 if not path:
1103 if not path:
1104 raise error.Abort((b'default repository not configured!'),
1104 raise error.Abort((b'default repository not configured!'),
1105 hint=(b"see 'hg help config.paths'"))
1105 hint=(b"see 'hg help config.paths'"))
1106 dest = path.pushloc or path.loc
1106 dest = path.pushloc or path.loc
1107 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
1107 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
1108 other = hg.peer(repo, opts, dest)
1108 other = hg.peer(repo, opts, dest)
1109
1109
1110 # easier to perform discovery through the operation
1110 # easier to perform discovery through the operation
1111 op = exchange.pushoperation(repo, other)
1111 op = exchange.pushoperation(repo, other)
1112 exchange._pushdiscoverychangeset(op)
1112 exchange._pushdiscoverychangeset(op)
1113
1113
1114 remotesubset = op.fallbackheads
1114 remotesubset = op.fallbackheads
1115
1115
1116 with other.commandexecutor() as e:
1116 with other.commandexecutor() as e:
1117 remotephases = e.callcommand(b'listkeys',
1117 remotephases = e.callcommand(b'listkeys',
1118 {b'namespace': b'phases'}).result()
1118 {b'namespace': b'phases'}).result()
1119 del other
1119 del other
1120 publishing = remotephases.get(b'publishing', False)
1120 publishing = remotephases.get(b'publishing', False)
1121 if publishing:
1121 if publishing:
1122 ui.status((b'publishing: yes\n'))
1122 ui.status((b'publishing: yes\n'))
1123 else:
1123 else:
1124 ui.status((b'publishing: no\n'))
1124 ui.status((b'publishing: no\n'))
1125
1125
1126 nodemap = repo.changelog.nodemap
1126 nodemap = repo.changelog.nodemap
1127 nonpublishroots = 0
1127 nonpublishroots = 0
1128 for nhex, phase in remotephases.iteritems():
1128 for nhex, phase in remotephases.iteritems():
1129 if nhex == b'publishing': # ignore data related to publish option
1129 if nhex == b'publishing': # ignore data related to publish option
1130 continue
1130 continue
1131 node = bin(nhex)
1131 node = bin(nhex)
1132 if node in nodemap and int(phase):
1132 if node in nodemap and int(phase):
1133 nonpublishroots += 1
1133 nonpublishroots += 1
1134 ui.status((b'number of roots: %d\n') % len(remotephases))
1134 ui.status((b'number of roots: %d\n') % len(remotephases))
1135 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
1135 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
1136 def d():
1136 def d():
1137 phases.remotephasessummary(repo,
1137 phases.remotephasessummary(repo,
1138 remotesubset,
1138 remotesubset,
1139 remotephases)
1139 remotephases)
1140 timer(d)
1140 timer(d)
1141 fm.end()
1141 fm.end()
1142
1142
1143 @command(b'perfmanifest',[
1143 @command(b'perfmanifest',[
1144 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1144 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1145 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1145 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1146 ] + formatteropts, b'REV|NODE')
1146 ] + formatteropts, b'REV|NODE')
1147 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1147 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1148 """benchmark the time to read a manifest from disk and return a usable
1148 """benchmark the time to read a manifest from disk and return a usable
1149 dict-like object
1149 dict-like object
1150
1150
1151 Manifest caches are cleared before retrieval."""
1151 Manifest caches are cleared before retrieval."""
1152 opts = _byteskwargs(opts)
1152 opts = _byteskwargs(opts)
1153 timer, fm = gettimer(ui, opts)
1153 timer, fm = gettimer(ui, opts)
1154 if not manifest_rev:
1154 if not manifest_rev:
1155 ctx = scmutil.revsingle(repo, rev, rev)
1155 ctx = scmutil.revsingle(repo, rev, rev)
1156 t = ctx.manifestnode()
1156 t = ctx.manifestnode()
1157 else:
1157 else:
1158 from mercurial.node import bin
1158 from mercurial.node import bin
1159
1159
1160 if len(rev) == 40:
1160 if len(rev) == 40:
1161 t = bin(rev)
1161 t = bin(rev)
1162 else:
1162 else:
1163 try:
1163 try:
1164 rev = int(rev)
1164 rev = int(rev)
1165
1165
1166 if util.safehasattr(repo.manifestlog, b'getstorage'):
1166 if util.safehasattr(repo.manifestlog, b'getstorage'):
1167 t = repo.manifestlog.getstorage(b'').node(rev)
1167 t = repo.manifestlog.getstorage(b'').node(rev)
1168 else:
1168 else:
1169 t = repo.manifestlog._revlog.lookup(rev)
1169 t = repo.manifestlog._revlog.lookup(rev)
1170 except ValueError:
1170 except ValueError:
1171 raise error.Abort(b'manifest revision must be integer or full '
1171 raise error.Abort(b'manifest revision must be integer or full '
1172 b'node')
1172 b'node')
1173 def d():
1173 def d():
1174 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1174 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1175 repo.manifestlog[t].read()
1175 repo.manifestlog[t].read()
1176 timer(d)
1176 timer(d)
1177 fm.end()
1177 fm.end()
1178
1178
1179 @command(b'perfchangeset', formatteropts)
1179 @command(b'perfchangeset', formatteropts)
1180 def perfchangeset(ui, repo, rev, **opts):
1180 def perfchangeset(ui, repo, rev, **opts):
1181 opts = _byteskwargs(opts)
1181 opts = _byteskwargs(opts)
1182 timer, fm = gettimer(ui, opts)
1182 timer, fm = gettimer(ui, opts)
1183 n = scmutil.revsingle(repo, rev).node()
1183 n = scmutil.revsingle(repo, rev).node()
1184 def d():
1184 def d():
1185 repo.changelog.read(n)
1185 repo.changelog.read(n)
1186 #repo.changelog._cache = None
1186 #repo.changelog._cache = None
1187 timer(d)
1187 timer(d)
1188 fm.end()
1188 fm.end()
1189
1189
1190 @command(b'perfignore', formatteropts)
1190 @command(b'perfignore', formatteropts)
1191 def perfignore(ui, repo, **opts):
1191 def perfignore(ui, repo, **opts):
1192 """benchmark operation related to computing ignore"""
1192 """benchmark operation related to computing ignore"""
1193 opts = _byteskwargs(opts)
1193 opts = _byteskwargs(opts)
1194 timer, fm = gettimer(ui, opts)
1194 timer, fm = gettimer(ui, opts)
1195 dirstate = repo.dirstate
1195 dirstate = repo.dirstate
1196
1196
1197 def setupone():
1197 def setupone():
1198 dirstate.invalidate()
1198 dirstate.invalidate()
1199 clearfilecache(dirstate, b'_ignore')
1199 clearfilecache(dirstate, b'_ignore')
1200
1200
1201 def runone():
1201 def runone():
1202 dirstate._ignore
1202 dirstate._ignore
1203
1203
1204 timer(runone, setup=setupone, title=b"load")
1204 timer(runone, setup=setupone, title=b"load")
1205 fm.end()
1205 fm.end()
1206
1206
1207 @command(b'perfindex', [
1207 @command(b'perfindex', [
1208 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1208 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1209 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1209 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1210 ] + formatteropts)
1210 ] + formatteropts)
1211 def perfindex(ui, repo, **opts):
1211 def perfindex(ui, repo, **opts):
1212 """benchmark index creation time followed by a lookup
1212 """benchmark index creation time followed by a lookup
1213
1213
1214 The default is to look `tip` up. Depending on the index implementation,
1214 The default is to look `tip` up. Depending on the index implementation,
1215 the revision looked up can matters. For example, an implementation
1215 the revision looked up can matters. For example, an implementation
1216 scanning the index will have a faster lookup time for `--rev tip` than for
1216 scanning the index will have a faster lookup time for `--rev tip` than for
1217 `--rev 0`. The number of looked up revisions and their order can also
1217 `--rev 0`. The number of looked up revisions and their order can also
1218 matters.
1218 matters.
1219
1219
1220 Example of useful set to test:
1220 Example of useful set to test:
1221 * tip
1221 * tip
1222 * 0
1222 * 0
1223 * -10:
1223 * -10:
1224 * :10
1224 * :10
1225 * -10: + :10
1225 * -10: + :10
1226 * :10: + -10:
1226 * :10: + -10:
1227 * -10000:
1227 * -10000:
1228 * -10000: + 0
1228 * -10000: + 0
1229
1229
1230 It is not currently possible to check for lookup of a missing node. For
1230 It is not currently possible to check for lookup of a missing node. For
1231 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1231 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1232 import mercurial.revlog
1232 import mercurial.revlog
1233 opts = _byteskwargs(opts)
1233 opts = _byteskwargs(opts)
1234 timer, fm = gettimer(ui, opts)
1234 timer, fm = gettimer(ui, opts)
1235 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1235 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1236 if opts[b'no_lookup']:
1236 if opts[b'no_lookup']:
1237 if opts['rev']:
1237 if opts['rev']:
1238 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1238 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1239 nodes = []
1239 nodes = []
1240 elif not opts[b'rev']:
1240 elif not opts[b'rev']:
1241 nodes = [repo[b"tip"].node()]
1241 nodes = [repo[b"tip"].node()]
1242 else:
1242 else:
1243 revs = scmutil.revrange(repo, opts[b'rev'])
1243 revs = scmutil.revrange(repo, opts[b'rev'])
1244 cl = repo.changelog
1244 cl = repo.changelog
1245 nodes = [cl.node(r) for r in revs]
1245 nodes = [cl.node(r) for r in revs]
1246
1246
1247 unfi = repo.unfiltered()
1247 unfi = repo.unfiltered()
1248 # find the filecache func directly
1248 # find the filecache func directly
1249 # This avoid polluting the benchmark with the filecache logic
1249 # This avoid polluting the benchmark with the filecache logic
1250 makecl = unfi.__class__.changelog.func
1250 makecl = unfi.__class__.changelog.func
1251 def setup():
1251 def setup():
1252 # probably not necessary, but for good measure
1252 # probably not necessary, but for good measure
1253 clearchangelog(unfi)
1253 clearchangelog(unfi)
1254 def d():
1254 def d():
1255 cl = makecl(unfi)
1255 cl = makecl(unfi)
1256 for n in nodes:
1256 for n in nodes:
1257 cl.rev(n)
1257 cl.rev(n)
1258 timer(d, setup=setup)
1258 timer(d, setup=setup)
1259 fm.end()
1259 fm.end()
1260
1260
1261 @command(b'perfnodemap', [
1261 @command(b'perfnodemap', [
1262 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1262 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1263 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1263 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1264 ] + formatteropts)
1264 ] + formatteropts)
1265 def perfnodemap(ui, repo, **opts):
1265 def perfnodemap(ui, repo, **opts):
1266 """benchmark the time necessary to look up revision from a cold nodemap
1266 """benchmark the time necessary to look up revision from a cold nodemap
1267
1267
1268 Depending on the implementation, the amount and order of revision we look
1268 Depending on the implementation, the amount and order of revision we look
1269 up can varies. Example of useful set to test:
1269 up can varies. Example of useful set to test:
1270 * tip
1270 * tip
1271 * 0
1271 * 0
1272 * -10:
1272 * -10:
1273 * :10
1273 * :10
1274 * -10: + :10
1274 * -10: + :10
1275 * :10: + -10:
1275 * :10: + -10:
1276 * -10000:
1276 * -10000:
1277 * -10000: + 0
1277 * -10000: + 0
1278
1278
1279 The command currently focus on valid binary lookup. Benchmarking for
1279 The command currently focus on valid binary lookup. Benchmarking for
1280 hexlookup, prefix lookup and missing lookup would also be valuable.
1280 hexlookup, prefix lookup and missing lookup would also be valuable.
1281 """
1281 """
1282 import mercurial.revlog
1282 import mercurial.revlog
1283 opts = _byteskwargs(opts)
1283 opts = _byteskwargs(opts)
1284 timer, fm = gettimer(ui, opts)
1284 timer, fm = gettimer(ui, opts)
1285 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1285 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1286
1286
1287 unfi = repo.unfiltered()
1287 unfi = repo.unfiltered()
1288 clearcaches = opts['clear_caches']
1288 clearcaches = opts['clear_caches']
1289 # find the filecache func directly
1289 # find the filecache func directly
1290 # This avoid polluting the benchmark with the filecache logic
1290 # This avoid polluting the benchmark with the filecache logic
1291 makecl = unfi.__class__.changelog.func
1291 makecl = unfi.__class__.changelog.func
1292 if not opts[b'rev']:
1292 if not opts[b'rev']:
1293 raise error.Abort('use --rev to specify revisions to look up')
1293 raise error.Abort('use --rev to specify revisions to look up')
1294 revs = scmutil.revrange(repo, opts[b'rev'])
1294 revs = scmutil.revrange(repo, opts[b'rev'])
1295 cl = repo.changelog
1295 cl = repo.changelog
1296 nodes = [cl.node(r) for r in revs]
1296 nodes = [cl.node(r) for r in revs]
1297
1297
1298 # use a list to pass reference to a nodemap from one closure to the next
1298 # use a list to pass reference to a nodemap from one closure to the next
1299 nodeget = [None]
1299 nodeget = [None]
1300 def setnodeget():
1300 def setnodeget():
1301 # probably not necessary, but for good measure
1301 # probably not necessary, but for good measure
1302 clearchangelog(unfi)
1302 clearchangelog(unfi)
1303 nodeget[0] = makecl(unfi).nodemap.get
1303 nodeget[0] = makecl(unfi).nodemap.get
1304
1304
1305 def d():
1305 def d():
1306 get = nodeget[0]
1306 get = nodeget[0]
1307 for n in nodes:
1307 for n in nodes:
1308 get(n)
1308 get(n)
1309
1309
1310 setup = None
1310 setup = None
1311 if clearcaches:
1311 if clearcaches:
1312 def setup():
1312 def setup():
1313 setnodeget()
1313 setnodeget()
1314 else:
1314 else:
1315 setnodeget()
1315 setnodeget()
1316 d() # prewarm the data structure
1316 d() # prewarm the data structure
1317 timer(d, setup=setup)
1317 timer(d, setup=setup)
1318 fm.end()
1318 fm.end()
1319
1319
1320 @command(b'perfstartup', formatteropts)
1320 @command(b'perfstartup', formatteropts)
1321 def perfstartup(ui, repo, **opts):
1321 def perfstartup(ui, repo, **opts):
1322 opts = _byteskwargs(opts)
1322 opts = _byteskwargs(opts)
1323 timer, fm = gettimer(ui, opts)
1323 timer, fm = gettimer(ui, opts)
1324 def d():
1324 def d():
1325 if os.name != r'nt':
1325 if os.name != r'nt':
1326 os.system(b"HGRCPATH= %s version -q > /dev/null" %
1326 os.system(b"HGRCPATH= %s version -q > /dev/null" %
1327 fsencode(sys.argv[0]))
1327 fsencode(sys.argv[0]))
1328 else:
1328 else:
1329 os.environ[r'HGRCPATH'] = r' '
1329 os.environ[r'HGRCPATH'] = r' '
1330 os.system(r"%s version -q > NUL" % sys.argv[0])
1330 os.system(r"%s version -q > NUL" % sys.argv[0])
1331 timer(d)
1331 timer(d)
1332 fm.end()
1332 fm.end()
1333
1333
1334 @command(b'perfparents', formatteropts)
1334 @command(b'perfparents', formatteropts)
1335 def perfparents(ui, repo, **opts):
1335 def perfparents(ui, repo, **opts):
1336 """benchmark the time necessary to fetch one changeset's parents.
1336 """benchmark the time necessary to fetch one changeset's parents.
1337
1337
1338 The fetch is done using the `node identifier`, traversing all object layers
1338 The fetch is done using the `node identifier`, traversing all object layers
1339 from the repository object. The first N revisions will be used for this
1339 from the repository object. The first N revisions will be used for this
1340 benchmark. N is controlled by the ``perf.parentscount`` config option
1340 benchmark. N is controlled by the ``perf.parentscount`` config option
1341 (default: 1000).
1341 (default: 1000).
1342 """
1342 """
1343 opts = _byteskwargs(opts)
1343 opts = _byteskwargs(opts)
1344 timer, fm = gettimer(ui, opts)
1344 timer, fm = gettimer(ui, opts)
1345 # control the number of commits perfparents iterates over
1345 # control the number of commits perfparents iterates over
1346 # experimental config: perf.parentscount
1346 # experimental config: perf.parentscount
1347 count = getint(ui, b"perf", b"parentscount", 1000)
1347 count = getint(ui, b"perf", b"parentscount", 1000)
1348 if len(repo.changelog) < count:
1348 if len(repo.changelog) < count:
1349 raise error.Abort(b"repo needs %d commits for this test" % count)
1349 raise error.Abort(b"repo needs %d commits for this test" % count)
1350 repo = repo.unfiltered()
1350 repo = repo.unfiltered()
1351 nl = [repo.changelog.node(i) for i in _xrange(count)]
1351 nl = [repo.changelog.node(i) for i in _xrange(count)]
1352 def d():
1352 def d():
1353 for n in nl:
1353 for n in nl:
1354 repo.changelog.parents(n)
1354 repo.changelog.parents(n)
1355 timer(d)
1355 timer(d)
1356 fm.end()
1356 fm.end()
1357
1357
1358 @command(b'perfctxfiles', formatteropts)
1358 @command(b'perfctxfiles', formatteropts)
1359 def perfctxfiles(ui, repo, x, **opts):
1359 def perfctxfiles(ui, repo, x, **opts):
1360 opts = _byteskwargs(opts)
1360 opts = _byteskwargs(opts)
1361 x = int(x)
1361 x = int(x)
1362 timer, fm = gettimer(ui, opts)
1362 timer, fm = gettimer(ui, opts)
1363 def d():
1363 def d():
1364 len(repo[x].files())
1364 len(repo[x].files())
1365 timer(d)
1365 timer(d)
1366 fm.end()
1366 fm.end()
1367
1367
1368 @command(b'perfrawfiles', formatteropts)
1368 @command(b'perfrawfiles', formatteropts)
1369 def perfrawfiles(ui, repo, x, **opts):
1369 def perfrawfiles(ui, repo, x, **opts):
1370 opts = _byteskwargs(opts)
1370 opts = _byteskwargs(opts)
1371 x = int(x)
1371 x = int(x)
1372 timer, fm = gettimer(ui, opts)
1372 timer, fm = gettimer(ui, opts)
1373 cl = repo.changelog
1373 cl = repo.changelog
1374 def d():
1374 def d():
1375 len(cl.read(x)[3])
1375 len(cl.read(x)[3])
1376 timer(d)
1376 timer(d)
1377 fm.end()
1377 fm.end()
1378
1378
1379 @command(b'perflookup', formatteropts)
1379 @command(b'perflookup', formatteropts)
1380 def perflookup(ui, repo, rev, **opts):
1380 def perflookup(ui, repo, rev, **opts):
1381 opts = _byteskwargs(opts)
1381 opts = _byteskwargs(opts)
1382 timer, fm = gettimer(ui, opts)
1382 timer, fm = gettimer(ui, opts)
1383 timer(lambda: len(repo.lookup(rev)))
1383 timer(lambda: len(repo.lookup(rev)))
1384 fm.end()
1384 fm.end()
1385
1385
1386 @command(b'perflinelogedits',
1386 @command(b'perflinelogedits',
1387 [(b'n', b'edits', 10000, b'number of edits'),
1387 [(b'n', b'edits', 10000, b'number of edits'),
1388 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1388 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1389 ], norepo=True)
1389 ], norepo=True)
1390 def perflinelogedits(ui, **opts):
1390 def perflinelogedits(ui, **opts):
1391 from mercurial import linelog
1391 from mercurial import linelog
1392
1392
1393 opts = _byteskwargs(opts)
1393 opts = _byteskwargs(opts)
1394
1394
1395 edits = opts[b'edits']
1395 edits = opts[b'edits']
1396 maxhunklines = opts[b'max_hunk_lines']
1396 maxhunklines = opts[b'max_hunk_lines']
1397
1397
1398 maxb1 = 100000
1398 maxb1 = 100000
1399 random.seed(0)
1399 random.seed(0)
1400 randint = random.randint
1400 randint = random.randint
1401 currentlines = 0
1401 currentlines = 0
1402 arglist = []
1402 arglist = []
1403 for rev in _xrange(edits):
1403 for rev in _xrange(edits):
1404 a1 = randint(0, currentlines)
1404 a1 = randint(0, currentlines)
1405 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1405 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1406 b1 = randint(0, maxb1)
1406 b1 = randint(0, maxb1)
1407 b2 = randint(b1, b1 + maxhunklines)
1407 b2 = randint(b1, b1 + maxhunklines)
1408 currentlines += (b2 - b1) - (a2 - a1)
1408 currentlines += (b2 - b1) - (a2 - a1)
1409 arglist.append((rev, a1, a2, b1, b2))
1409 arglist.append((rev, a1, a2, b1, b2))
1410
1410
1411 def d():
1411 def d():
1412 ll = linelog.linelog()
1412 ll = linelog.linelog()
1413 for args in arglist:
1413 for args in arglist:
1414 ll.replacelines(*args)
1414 ll.replacelines(*args)
1415
1415
1416 timer, fm = gettimer(ui, opts)
1416 timer, fm = gettimer(ui, opts)
1417 timer(d)
1417 timer(d)
1418 fm.end()
1418 fm.end()
1419
1419
1420 @command(b'perfrevrange', formatteropts)
1420 @command(b'perfrevrange', formatteropts)
1421 def perfrevrange(ui, repo, *specs, **opts):
1421 def perfrevrange(ui, repo, *specs, **opts):
1422 opts = _byteskwargs(opts)
1422 opts = _byteskwargs(opts)
1423 timer, fm = gettimer(ui, opts)
1423 timer, fm = gettimer(ui, opts)
1424 revrange = scmutil.revrange
1424 revrange = scmutil.revrange
1425 timer(lambda: len(revrange(repo, specs)))
1425 timer(lambda: len(revrange(repo, specs)))
1426 fm.end()
1426 fm.end()
1427
1427
1428 @command(b'perfnodelookup', formatteropts)
1428 @command(b'perfnodelookup', formatteropts)
1429 def perfnodelookup(ui, repo, rev, **opts):
1429 def perfnodelookup(ui, repo, rev, **opts):
1430 opts = _byteskwargs(opts)
1430 opts = _byteskwargs(opts)
1431 timer, fm = gettimer(ui, opts)
1431 timer, fm = gettimer(ui, opts)
1432 import mercurial.revlog
1432 import mercurial.revlog
1433 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1433 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1434 n = scmutil.revsingle(repo, rev).node()
1434 n = scmutil.revsingle(repo, rev).node()
1435 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1435 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1436 def d():
1436 def d():
1437 cl.rev(n)
1437 cl.rev(n)
1438 clearcaches(cl)
1438 clearcaches(cl)
1439 timer(d)
1439 timer(d)
1440 fm.end()
1440 fm.end()
1441
1441
1442 @command(b'perflog',
1442 @command(b'perflog',
1443 [(b'', b'rename', False, b'ask log to follow renames')
1443 [(b'', b'rename', False, b'ask log to follow renames')
1444 ] + formatteropts)
1444 ] + formatteropts)
1445 def perflog(ui, repo, rev=None, **opts):
1445 def perflog(ui, repo, rev=None, **opts):
1446 opts = _byteskwargs(opts)
1446 opts = _byteskwargs(opts)
1447 if rev is None:
1447 if rev is None:
1448 rev=[]
1448 rev=[]
1449 timer, fm = gettimer(ui, opts)
1449 timer, fm = gettimer(ui, opts)
1450 ui.pushbuffer()
1450 ui.pushbuffer()
1451 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1451 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1452 copies=opts.get(b'rename')))
1452 copies=opts.get(b'rename')))
1453 ui.popbuffer()
1453 ui.popbuffer()
1454 fm.end()
1454 fm.end()
1455
1455
1456 @command(b'perfmoonwalk', formatteropts)
1456 @command(b'perfmoonwalk', formatteropts)
1457 def perfmoonwalk(ui, repo, **opts):
1457 def perfmoonwalk(ui, repo, **opts):
1458 """benchmark walking the changelog backwards
1458 """benchmark walking the changelog backwards
1459
1459
1460 This also loads the changelog data for each revision in the changelog.
1460 This also loads the changelog data for each revision in the changelog.
1461 """
1461 """
1462 opts = _byteskwargs(opts)
1462 opts = _byteskwargs(opts)
1463 timer, fm = gettimer(ui, opts)
1463 timer, fm = gettimer(ui, opts)
1464 def moonwalk():
1464 def moonwalk():
1465 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1465 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1466 ctx = repo[i]
1466 ctx = repo[i]
1467 ctx.branch() # read changelog data (in addition to the index)
1467 ctx.branch() # read changelog data (in addition to the index)
1468 timer(moonwalk)
1468 timer(moonwalk)
1469 fm.end()
1469 fm.end()
1470
1470
1471 @command(b'perftemplating',
1471 @command(b'perftemplating',
1472 [(b'r', b'rev', [], b'revisions to run the template on'),
1472 [(b'r', b'rev', [], b'revisions to run the template on'),
1473 ] + formatteropts)
1473 ] + formatteropts)
1474 def perftemplating(ui, repo, testedtemplate=None, **opts):
1474 def perftemplating(ui, repo, testedtemplate=None, **opts):
1475 """test the rendering time of a given template"""
1475 """test the rendering time of a given template"""
1476 if makelogtemplater is None:
1476 if makelogtemplater is None:
1477 raise error.Abort((b"perftemplating not available with this Mercurial"),
1477 raise error.Abort((b"perftemplating not available with this Mercurial"),
1478 hint=b"use 4.3 or later")
1478 hint=b"use 4.3 or later")
1479
1479
1480 opts = _byteskwargs(opts)
1480 opts = _byteskwargs(opts)
1481
1481
1482 nullui = ui.copy()
1482 nullui = ui.copy()
1483 nullui.fout = open(os.devnull, r'wb')
1483 nullui.fout = open(os.devnull, r'wb')
1484 nullui.disablepager()
1484 nullui.disablepager()
1485 revs = opts.get(b'rev')
1485 revs = opts.get(b'rev')
1486 if not revs:
1486 if not revs:
1487 revs = [b'all()']
1487 revs = [b'all()']
1488 revs = list(scmutil.revrange(repo, revs))
1488 revs = list(scmutil.revrange(repo, revs))
1489
1489
1490 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1490 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1491 b' {author|person}: {desc|firstline}\n')
1491 b' {author|person}: {desc|firstline}\n')
1492 if testedtemplate is None:
1492 if testedtemplate is None:
1493 testedtemplate = defaulttemplate
1493 testedtemplate = defaulttemplate
1494 displayer = makelogtemplater(nullui, repo, testedtemplate)
1494 displayer = makelogtemplater(nullui, repo, testedtemplate)
1495 def format():
1495 def format():
1496 for r in revs:
1496 for r in revs:
1497 ctx = repo[r]
1497 ctx = repo[r]
1498 displayer.show(ctx)
1498 displayer.show(ctx)
1499 displayer.flush(ctx)
1499 displayer.flush(ctx)
1500
1500
1501 timer, fm = gettimer(ui, opts)
1501 timer, fm = gettimer(ui, opts)
1502 timer(format)
1502 timer(format)
1503 fm.end()
1503 fm.end()
1504
1504
1505 def _displaystats(ui, opts, entries, data):
1506 pass
1507 # use a second formatter because the data are quite different, not sure
1508 # how it flies with the templater.
1509 fm = ui.formatter(b'perf-stats', opts)
1510 for key, title in entries:
1511 values = data[key]
1512 nbvalues = len(data)
1513 values.sort()
1514 stats = {
1515 'key': key,
1516 'title': title,
1517 'nbitems': len(values),
1518 'min': values[0][0],
1519 '10%': values[(nbvalues * 10) // 100][0],
1520 '25%': values[(nbvalues * 25) // 100][0],
1521 '50%': values[(nbvalues * 50) // 100][0],
1522 '75%': values[(nbvalues * 75) // 100][0],
1523 '80%': values[(nbvalues * 80) // 100][0],
1524 '85%': values[(nbvalues * 85) // 100][0],
1525 '90%': values[(nbvalues * 90) // 100][0],
1526 '95%': values[(nbvalues * 95) // 100][0],
1527 '99%': values[(nbvalues * 99) // 100][0],
1528 'max': values[-1][0],
1529 }
1530 fm.startitem()
1531 fm.data(**stats)
1532 # make node pretty for the human output
1533 fm.plain('### %s (%d items)\n' % (title, len(values)))
1534 lines = [
1535 'min',
1536 '10%',
1537 '25%',
1538 '50%',
1539 '75%',
1540 '80%',
1541 '85%',
1542 '90%',
1543 '95%',
1544 '99%',
1545 'max',
1546 ]
1547 for l in lines:
1548 fm.plain('%s: %s\n' % (l, stats[l]))
1549 fm.end()
1550
1505 @command(b'perfhelper-mergecopies', formatteropts +
1551 @command(b'perfhelper-mergecopies', formatteropts +
1506 [
1552 [
1507 (b'r', b'revs', [], b'restrict search to these revisions'),
1553 (b'r', b'revs', [], b'restrict search to these revisions'),
1508 (b'', b'timing', False, b'provides extra data (costly)'),
1554 (b'', b'timing', False, b'provides extra data (costly)'),
1555 (b'', b'stats', False, b'provides statistic about the measured data'),
1509 ])
1556 ])
1510 def perfhelpermergecopies(ui, repo, revs=[], **opts):
1557 def perfhelpermergecopies(ui, repo, revs=[], **opts):
1511 """find statistics about potential parameters for `perfmergecopies`
1558 """find statistics about potential parameters for `perfmergecopies`
1512
1559
1513 This command find (base, p1, p2) triplet relevant for copytracing
1560 This command find (base, p1, p2) triplet relevant for copytracing
1514 benchmarking in the context of a merge. It reports values for some of the
1561 benchmarking in the context of a merge. It reports values for some of the
1515 parameters that impact merge copy tracing time during merge.
1562 parameters that impact merge copy tracing time during merge.
1516
1563
1517 If `--timing` is set, rename detection is run and the associated timing
1564 If `--timing` is set, rename detection is run and the associated timing
1518 will be reported. The extra details come at the cost of slower command
1565 will be reported. The extra details come at the cost of slower command
1519 execution.
1566 execution.
1520
1567
1521 Since rename detection is only run once, other factors might easily
1568 Since rename detection is only run once, other factors might easily
1522 affect the precision of the timing. However it should give a good
1569 affect the precision of the timing. However it should give a good
1523 approximation of which revision triplets are very costly.
1570 approximation of which revision triplets are very costly.
1524 """
1571 """
1525 opts = _byteskwargs(opts)
1572 opts = _byteskwargs(opts)
1526 fm = ui.formatter(b'perf', opts)
1573 fm = ui.formatter(b'perf', opts)
1527 dotiming = opts[b'timing']
1574 dotiming = opts[b'timing']
1575 dostats = opts[b'stats']
1528
1576
1529 output_template = [
1577 output_template = [
1530 ("base", "%(base)12s"),
1578 ("base", "%(base)12s"),
1531 ("p1", "%(p1.node)12s"),
1579 ("p1", "%(p1.node)12s"),
1532 ("p2", "%(p2.node)12s"),
1580 ("p2", "%(p2.node)12s"),
1533 ("p1.nb-revs", "%(p1.nbrevs)12d"),
1581 ("p1.nb-revs", "%(p1.nbrevs)12d"),
1534 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
1582 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
1535 ("p1.renames", "%(p1.renamedfiles)12d"),
1583 ("p1.renames", "%(p1.renamedfiles)12d"),
1536 ("p1.time", "%(p1.time)12.3f"),
1584 ("p1.time", "%(p1.time)12.3f"),
1537 ("p2.nb-revs", "%(p2.nbrevs)12d"),
1585 ("p2.nb-revs", "%(p2.nbrevs)12d"),
1538 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
1586 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
1539 ("p2.renames", "%(p2.renamedfiles)12d"),
1587 ("p2.renames", "%(p2.renamedfiles)12d"),
1540 ("p2.time", "%(p2.time)12.3f"),
1588 ("p2.time", "%(p2.time)12.3f"),
1541 ("renames", "%(nbrenamedfiles)12d"),
1589 ("renames", "%(nbrenamedfiles)12d"),
1542 ("total.time", "%(time)12.3f"),
1590 ("total.time", "%(time)12.3f"),
1543 ]
1591 ]
1544 if not dotiming:
1592 if not dotiming:
1545 output_template = [i for i in output_template
1593 output_template = [i for i in output_template
1546 if not ('time' in i[0] or 'renames' in i[0])]
1594 if not ('time' in i[0] or 'renames' in i[0])]
1547 header_names = [h for (h, v) in output_template]
1595 header_names = [h for (h, v) in output_template]
1548 output = ' '.join([v for (h, v) in output_template]) + '\n'
1596 output = ' '.join([v for (h, v) in output_template]) + '\n'
1549 header = ' '.join(['%12s'] * len(header_names)) + '\n'
1597 header = ' '.join(['%12s'] * len(header_names)) + '\n'
1550 fm.plain(header % tuple(header_names))
1598 fm.plain(header % tuple(header_names))
1551
1599
1552 if not revs:
1600 if not revs:
1553 revs = ['all()']
1601 revs = ['all()']
1554 revs = scmutil.revrange(repo, revs)
1602 revs = scmutil.revrange(repo, revs)
1555
1603
1604 if dostats:
1605 alldata = {
1606 'nbrevs': [],
1607 'nbmissingfiles': [],
1608 }
1609 if dotiming:
1610 alldata['parentnbrenames'] = []
1611 alldata['totalnbrenames'] = []
1612 alldata['parenttime'] = []
1613 alldata['totaltime'] = []
1614
1556 roi = repo.revs('merge() and %ld', revs)
1615 roi = repo.revs('merge() and %ld', revs)
1557 for r in roi:
1616 for r in roi:
1558 ctx = repo[r]
1617 ctx = repo[r]
1559 p1 = ctx.p1()
1618 p1 = ctx.p1()
1560 p2 = ctx.p2()
1619 p2 = ctx.p2()
1561 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
1620 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
1562 for b in bases:
1621 for b in bases:
1563 b = repo[b]
1622 b = repo[b]
1564 p1missing = copies._computeforwardmissing(b, p1)
1623 p1missing = copies._computeforwardmissing(b, p1)
1565 p2missing = copies._computeforwardmissing(b, p2)
1624 p2missing = copies._computeforwardmissing(b, p2)
1566 data = {
1625 data = {
1567 b'base': b.hex(),
1626 b'base': b.hex(),
1568 b'p1.node': p1.hex(),
1627 b'p1.node': p1.hex(),
1569 b'p1.nbrevs': len(repo.revs('%d::%d', b.rev(), p1.rev())),
1628 b'p1.nbrevs': len(repo.revs('%d::%d', b.rev(), p1.rev())),
1570 b'p1.nbmissingfiles': len(p1missing),
1629 b'p1.nbmissingfiles': len(p1missing),
1571 b'p2.node': p2.hex(),
1630 b'p2.node': p2.hex(),
1572 b'p2.nbrevs': len(repo.revs('%d::%d', b.rev(), p2.rev())),
1631 b'p2.nbrevs': len(repo.revs('%d::%d', b.rev(), p2.rev())),
1573 b'p2.nbmissingfiles': len(p2missing),
1632 b'p2.nbmissingfiles': len(p2missing),
1574 }
1633 }
1634 if dostats:
1635 if p1missing:
1636 alldata['nbrevs'].append((
1637 data['p1.nbrevs'],
1638 b.hex(),
1639 p1.hex()
1640 ))
1641 alldata['nbmissingfiles'].append((
1642 data['p1.nbmissingfiles'],
1643 b.hex(),
1644 p1.hex()
1645 ))
1646 if p2missing:
1647 alldata['nbrevs'].append((
1648 data['p2.nbrevs'],
1649 b.hex(),
1650 p2.hex()
1651 ))
1652 alldata['nbmissingfiles'].append((
1653 data['p2.nbmissingfiles'],
1654 b.hex(),
1655 p2.hex()
1656 ))
1575 if dotiming:
1657 if dotiming:
1576 begin = util.timer()
1658 begin = util.timer()
1577 mergedata = copies.mergecopies(repo, p1, p2, b)
1659 mergedata = copies.mergecopies(repo, p1, p2, b)
1578 end = util.timer()
1660 end = util.timer()
1579 # not very stable timing since we did only one run
1661 # not very stable timing since we did only one run
1580 data['time'] = end - begin
1662 data['time'] = end - begin
1581 # mergedata contains five dicts: "copy", "movewithdir",
1663 # mergedata contains five dicts: "copy", "movewithdir",
1582 # "diverge", "renamedelete" and "dirmove".
1664 # "diverge", "renamedelete" and "dirmove".
1583 # The first 4 are about renamed file so lets count that.
1665 # The first 4 are about renamed file so lets count that.
1584 renames = len(mergedata[0])
1666 renames = len(mergedata[0])
1585 renames += len(mergedata[1])
1667 renames += len(mergedata[1])
1586 renames += len(mergedata[2])
1668 renames += len(mergedata[2])
1587 renames += len(mergedata[3])
1669 renames += len(mergedata[3])
1588 data['nbrenamedfiles'] = renames
1670 data['nbrenamedfiles'] = renames
1589 begin = util.timer()
1671 begin = util.timer()
1590 p1renames = copies.pathcopies(b, p1)
1672 p1renames = copies.pathcopies(b, p1)
1591 end = util.timer()
1673 end = util.timer()
1592 data['p1.time'] = end - begin
1674 data['p1.time'] = end - begin
1593 begin = util.timer()
1675 begin = util.timer()
1594 p2renames = copies.pathcopies(b, p2)
1676 p2renames = copies.pathcopies(b, p2)
1595 data['p2.time'] = end - begin
1677 data['p2.time'] = end - begin
1596 end = util.timer()
1678 end = util.timer()
1597 data['p1.renamedfiles'] = len(p1renames)
1679 data['p1.renamedfiles'] = len(p1renames)
1598 data['p2.renamedfiles'] = len(p2renames)
1680 data['p2.renamedfiles'] = len(p2renames)
1681
1682 if dostats:
1683 if p1missing:
1684 alldata['parentnbrenames'].append((
1685 data['p1.renamedfiles'],
1686 b.hex(),
1687 p1.hex()
1688 ))
1689 alldata['parenttime'].append((
1690 data['p1.time'],
1691 b.hex(),
1692 p1.hex()
1693 ))
1694 if p2missing:
1695 alldata['parentnbrenames'].append((
1696 data['p2.renamedfiles'],
1697 b.hex(),
1698 p2.hex()
1699 ))
1700 alldata['parenttime'].append((
1701 data['p2.time'],
1702 b.hex(),
1703 p2.hex()
1704 ))
1705 if p1missing or p2missing:
1706 alldata['totalnbrenames'].append((
1707 data['nbrenamedfiles'],
1708 b.hex(),
1709 p1.hex(),
1710 p2.hex()
1711 ))
1712 alldata['totaltime'].append((
1713 data['time'],
1714 b.hex(),
1715 p1.hex(),
1716 p2.hex()
1717 ))
1599 fm.startitem()
1718 fm.startitem()
1600 fm.data(**data)
1719 fm.data(**data)
1601 # make node pretty for the human output
1720 # make node pretty for the human output
1602 out = data.copy()
1721 out = data.copy()
1603 out['base'] = fm.hexfunc(b.node())
1722 out['base'] = fm.hexfunc(b.node())
1604 out['p1.node'] = fm.hexfunc(p1.node())
1723 out['p1.node'] = fm.hexfunc(p1.node())
1605 out['p2.node'] = fm.hexfunc(p2.node())
1724 out['p2.node'] = fm.hexfunc(p2.node())
1606 fm.plain(output % out)
1725 fm.plain(output % out)
1607
1726
1608 fm.end()
1727 fm.end()
1728 if dostats:
1729 # use a second formatter because the data are quite different, not sure
1730 # how it flies with the templater.
1731 entries = [
1732 ('nbrevs', 'number of revision covered'),
1733 ('nbmissingfiles', 'number of missing files at head'),
1734 ]
1735 if dotiming:
1736 entries.append(('parentnbrenames',
1737 'rename from one parent to base'))
1738 entries.append(('totalnbrenames', 'total number of renames'))
1739 entries.append(('parenttime', 'time for one parent'))
1740 entries.append(('totaltime', 'time for both parents'))
1741 _displaystats(ui, opts, entries, alldata)
1742
1609
1743
1610 @command(b'perfhelper-pathcopies', formatteropts +
1744 @command(b'perfhelper-pathcopies', formatteropts +
1611 [
1745 [
1612 (b'r', b'revs', [], b'restrict search to these revisions'),
1746 (b'r', b'revs', [], b'restrict search to these revisions'),
1613 (b'', b'timing', False, b'provides extra data (costly)'),
1747 (b'', b'timing', False, b'provides extra data (costly)'),
1614 ])
1748 ])
1615 def perfhelperpathcopies(ui, repo, revs=[], **opts):
1749 def perfhelperpathcopies(ui, repo, revs=[], **opts):
1616 """find statistic about potential parameters for the `perftracecopies`
1750 """find statistic about potential parameters for the `perftracecopies`
1617
1751
1618 This command find source-destination pair relevant for copytracing testing.
1752 This command find source-destination pair relevant for copytracing testing.
1619 It report value for some of the parameters that impact copy tracing time.
1753 It report value for some of the parameters that impact copy tracing time.
1620
1754
1621 If `--timing` is set, rename detection is run and the associated timing
1755 If `--timing` is set, rename detection is run and the associated timing
1622 will be reported. The extra details comes at the cost of a slower command
1756 will be reported. The extra details comes at the cost of a slower command
1623 execution.
1757 execution.
1624
1758
1625 Since the rename detection is only run once, other factors might easily
1759 Since the rename detection is only run once, other factors might easily
1626 affect the precision of the timing. However it should give a good
1760 affect the precision of the timing. However it should give a good
1627 approximation of which revision pairs are very costly.
1761 approximation of which revision pairs are very costly.
1628 """
1762 """
1629 opts = _byteskwargs(opts)
1763 opts = _byteskwargs(opts)
1630 fm = ui.formatter(b'perf', opts)
1764 fm = ui.formatter(b'perf', opts)
1631 dotiming = opts[b'timing']
1765 dotiming = opts[b'timing']
1632
1766
1633 if dotiming:
1767 if dotiming:
1634 header = '%12s %12s %12s %12s %12s %12s\n'
1768 header = '%12s %12s %12s %12s %12s %12s\n'
1635 output = ("%(source)12s %(destination)12s "
1769 output = ("%(source)12s %(destination)12s "
1636 "%(nbrevs)12d %(nbmissingfiles)12d "
1770 "%(nbrevs)12d %(nbmissingfiles)12d "
1637 "%(nbrenamedfiles)12d %(time)18.5f\n")
1771 "%(nbrenamedfiles)12d %(time)18.5f\n")
1638 header_names = ("source", "destination", "nb-revs", "nb-files",
1772 header_names = ("source", "destination", "nb-revs", "nb-files",
1639 "nb-renames", "time")
1773 "nb-renames", "time")
1640 fm.plain(header % header_names)
1774 fm.plain(header % header_names)
1641 else:
1775 else:
1642 header = '%12s %12s %12s %12s\n'
1776 header = '%12s %12s %12s %12s\n'
1643 output = ("%(source)12s %(destination)12s "
1777 output = ("%(source)12s %(destination)12s "
1644 "%(nbrevs)12d %(nbmissingfiles)12d\n")
1778 "%(nbrevs)12d %(nbmissingfiles)12d\n")
1645 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
1779 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
1646
1780
1647 if not revs:
1781 if not revs:
1648 revs = ['all()']
1782 revs = ['all()']
1649 revs = scmutil.revrange(repo, revs)
1783 revs = scmutil.revrange(repo, revs)
1650
1784
1651 roi = repo.revs('merge() and %ld', revs)
1785 roi = repo.revs('merge() and %ld', revs)
1652 for r in roi:
1786 for r in roi:
1653 ctx = repo[r]
1787 ctx = repo[r]
1654 p1 = ctx.p1().rev()
1788 p1 = ctx.p1().rev()
1655 p2 = ctx.p2().rev()
1789 p2 = ctx.p2().rev()
1656 bases = repo.changelog._commonancestorsheads(p1, p2)
1790 bases = repo.changelog._commonancestorsheads(p1, p2)
1657 for p in (p1, p2):
1791 for p in (p1, p2):
1658 for b in bases:
1792 for b in bases:
1659 base = repo[b]
1793 base = repo[b]
1660 parent = repo[p]
1794 parent = repo[p]
1661 missing = copies._computeforwardmissing(base, parent)
1795 missing = copies._computeforwardmissing(base, parent)
1662 if not missing:
1796 if not missing:
1663 continue
1797 continue
1664 data = {
1798 data = {
1665 b'source': base.hex(),
1799 b'source': base.hex(),
1666 b'destination': parent.hex(),
1800 b'destination': parent.hex(),
1667 b'nbrevs': len(repo.revs('%d::%d', b, p)),
1801 b'nbrevs': len(repo.revs('%d::%d', b, p)),
1668 b'nbmissingfiles': len(missing),
1802 b'nbmissingfiles': len(missing),
1669 }
1803 }
1670 if dotiming:
1804 if dotiming:
1671 begin = util.timer()
1805 begin = util.timer()
1672 renames = copies.pathcopies(base, parent)
1806 renames = copies.pathcopies(base, parent)
1673 end = util.timer()
1807 end = util.timer()
1674 # not very stable timing since we did only one run
1808 # not very stable timing since we did only one run
1675 data['time'] = end - begin
1809 data['time'] = end - begin
1676 data['nbrenamedfiles'] = len(renames)
1810 data['nbrenamedfiles'] = len(renames)
1677 fm.startitem()
1811 fm.startitem()
1678 fm.data(**data)
1812 fm.data(**data)
1679 out = data.copy()
1813 out = data.copy()
1680 out['source'] = fm.hexfunc(base.node())
1814 out['source'] = fm.hexfunc(base.node())
1681 out['destination'] = fm.hexfunc(parent.node())
1815 out['destination'] = fm.hexfunc(parent.node())
1682 fm.plain(output % out)
1816 fm.plain(output % out)
1683
1817
1684 fm.end()
1818 fm.end()
1685
1819
1686 @command(b'perfcca', formatteropts)
1820 @command(b'perfcca', formatteropts)
1687 def perfcca(ui, repo, **opts):
1821 def perfcca(ui, repo, **opts):
1688 opts = _byteskwargs(opts)
1822 opts = _byteskwargs(opts)
1689 timer, fm = gettimer(ui, opts)
1823 timer, fm = gettimer(ui, opts)
1690 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1824 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1691 fm.end()
1825 fm.end()
1692
1826
1693 @command(b'perffncacheload', formatteropts)
1827 @command(b'perffncacheload', formatteropts)
1694 def perffncacheload(ui, repo, **opts):
1828 def perffncacheload(ui, repo, **opts):
1695 opts = _byteskwargs(opts)
1829 opts = _byteskwargs(opts)
1696 timer, fm = gettimer(ui, opts)
1830 timer, fm = gettimer(ui, opts)
1697 s = repo.store
1831 s = repo.store
1698 def d():
1832 def d():
1699 s.fncache._load()
1833 s.fncache._load()
1700 timer(d)
1834 timer(d)
1701 fm.end()
1835 fm.end()
1702
1836
1703 @command(b'perffncachewrite', formatteropts)
1837 @command(b'perffncachewrite', formatteropts)
1704 def perffncachewrite(ui, repo, **opts):
1838 def perffncachewrite(ui, repo, **opts):
1705 opts = _byteskwargs(opts)
1839 opts = _byteskwargs(opts)
1706 timer, fm = gettimer(ui, opts)
1840 timer, fm = gettimer(ui, opts)
1707 s = repo.store
1841 s = repo.store
1708 lock = repo.lock()
1842 lock = repo.lock()
1709 s.fncache._load()
1843 s.fncache._load()
1710 tr = repo.transaction(b'perffncachewrite')
1844 tr = repo.transaction(b'perffncachewrite')
1711 tr.addbackup(b'fncache')
1845 tr.addbackup(b'fncache')
1712 def d():
1846 def d():
1713 s.fncache._dirty = True
1847 s.fncache._dirty = True
1714 s.fncache.write(tr)
1848 s.fncache.write(tr)
1715 timer(d)
1849 timer(d)
1716 tr.close()
1850 tr.close()
1717 lock.release()
1851 lock.release()
1718 fm.end()
1852 fm.end()
1719
1853
1720 @command(b'perffncacheencode', formatteropts)
1854 @command(b'perffncacheencode', formatteropts)
1721 def perffncacheencode(ui, repo, **opts):
1855 def perffncacheencode(ui, repo, **opts):
1722 opts = _byteskwargs(opts)
1856 opts = _byteskwargs(opts)
1723 timer, fm = gettimer(ui, opts)
1857 timer, fm = gettimer(ui, opts)
1724 s = repo.store
1858 s = repo.store
1725 s.fncache._load()
1859 s.fncache._load()
1726 def d():
1860 def d():
1727 for p in s.fncache.entries:
1861 for p in s.fncache.entries:
1728 s.encode(p)
1862 s.encode(p)
1729 timer(d)
1863 timer(d)
1730 fm.end()
1864 fm.end()
1731
1865
1732 def _bdiffworker(q, blocks, xdiff, ready, done):
1866 def _bdiffworker(q, blocks, xdiff, ready, done):
1733 while not done.is_set():
1867 while not done.is_set():
1734 pair = q.get()
1868 pair = q.get()
1735 while pair is not None:
1869 while pair is not None:
1736 if xdiff:
1870 if xdiff:
1737 mdiff.bdiff.xdiffblocks(*pair)
1871 mdiff.bdiff.xdiffblocks(*pair)
1738 elif blocks:
1872 elif blocks:
1739 mdiff.bdiff.blocks(*pair)
1873 mdiff.bdiff.blocks(*pair)
1740 else:
1874 else:
1741 mdiff.textdiff(*pair)
1875 mdiff.textdiff(*pair)
1742 q.task_done()
1876 q.task_done()
1743 pair = q.get()
1877 pair = q.get()
1744 q.task_done() # for the None one
1878 q.task_done() # for the None one
1745 with ready:
1879 with ready:
1746 ready.wait()
1880 ready.wait()
1747
1881
1748 def _manifestrevision(repo, mnode):
1882 def _manifestrevision(repo, mnode):
1749 ml = repo.manifestlog
1883 ml = repo.manifestlog
1750
1884
1751 if util.safehasattr(ml, b'getstorage'):
1885 if util.safehasattr(ml, b'getstorage'):
1752 store = ml.getstorage(b'')
1886 store = ml.getstorage(b'')
1753 else:
1887 else:
1754 store = ml._revlog
1888 store = ml._revlog
1755
1889
1756 return store.revision(mnode)
1890 return store.revision(mnode)
1757
1891
1758 @command(b'perfbdiff', revlogopts + formatteropts + [
1892 @command(b'perfbdiff', revlogopts + formatteropts + [
1759 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1893 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1760 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1894 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1761 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1895 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1762 (b'', b'blocks', False, b'test computing diffs into blocks'),
1896 (b'', b'blocks', False, b'test computing diffs into blocks'),
1763 (b'', b'xdiff', False, b'use xdiff algorithm'),
1897 (b'', b'xdiff', False, b'use xdiff algorithm'),
1764 ],
1898 ],
1765
1899
1766 b'-c|-m|FILE REV')
1900 b'-c|-m|FILE REV')
1767 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1901 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1768 """benchmark a bdiff between revisions
1902 """benchmark a bdiff between revisions
1769
1903
1770 By default, benchmark a bdiff between its delta parent and itself.
1904 By default, benchmark a bdiff between its delta parent and itself.
1771
1905
1772 With ``--count``, benchmark bdiffs between delta parents and self for N
1906 With ``--count``, benchmark bdiffs between delta parents and self for N
1773 revisions starting at the specified revision.
1907 revisions starting at the specified revision.
1774
1908
1775 With ``--alldata``, assume the requested revision is a changeset and
1909 With ``--alldata``, assume the requested revision is a changeset and
1776 measure bdiffs for all changes related to that changeset (manifest
1910 measure bdiffs for all changes related to that changeset (manifest
1777 and filelogs).
1911 and filelogs).
1778 """
1912 """
1779 opts = _byteskwargs(opts)
1913 opts = _byteskwargs(opts)
1780
1914
1781 if opts[b'xdiff'] and not opts[b'blocks']:
1915 if opts[b'xdiff'] and not opts[b'blocks']:
1782 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1916 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1783
1917
1784 if opts[b'alldata']:
1918 if opts[b'alldata']:
1785 opts[b'changelog'] = True
1919 opts[b'changelog'] = True
1786
1920
1787 if opts.get(b'changelog') or opts.get(b'manifest'):
1921 if opts.get(b'changelog') or opts.get(b'manifest'):
1788 file_, rev = None, file_
1922 file_, rev = None, file_
1789 elif rev is None:
1923 elif rev is None:
1790 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1924 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1791
1925
1792 blocks = opts[b'blocks']
1926 blocks = opts[b'blocks']
1793 xdiff = opts[b'xdiff']
1927 xdiff = opts[b'xdiff']
1794 textpairs = []
1928 textpairs = []
1795
1929
1796 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1930 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1797
1931
1798 startrev = r.rev(r.lookup(rev))
1932 startrev = r.rev(r.lookup(rev))
1799 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1933 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1800 if opts[b'alldata']:
1934 if opts[b'alldata']:
1801 # Load revisions associated with changeset.
1935 # Load revisions associated with changeset.
1802 ctx = repo[rev]
1936 ctx = repo[rev]
1803 mtext = _manifestrevision(repo, ctx.manifestnode())
1937 mtext = _manifestrevision(repo, ctx.manifestnode())
1804 for pctx in ctx.parents():
1938 for pctx in ctx.parents():
1805 pman = _manifestrevision(repo, pctx.manifestnode())
1939 pman = _manifestrevision(repo, pctx.manifestnode())
1806 textpairs.append((pman, mtext))
1940 textpairs.append((pman, mtext))
1807
1941
1808 # Load filelog revisions by iterating manifest delta.
1942 # Load filelog revisions by iterating manifest delta.
1809 man = ctx.manifest()
1943 man = ctx.manifest()
1810 pman = ctx.p1().manifest()
1944 pman = ctx.p1().manifest()
1811 for filename, change in pman.diff(man).items():
1945 for filename, change in pman.diff(man).items():
1812 fctx = repo.file(filename)
1946 fctx = repo.file(filename)
1813 f1 = fctx.revision(change[0][0] or -1)
1947 f1 = fctx.revision(change[0][0] or -1)
1814 f2 = fctx.revision(change[1][0] or -1)
1948 f2 = fctx.revision(change[1][0] or -1)
1815 textpairs.append((f1, f2))
1949 textpairs.append((f1, f2))
1816 else:
1950 else:
1817 dp = r.deltaparent(rev)
1951 dp = r.deltaparent(rev)
1818 textpairs.append((r.revision(dp), r.revision(rev)))
1952 textpairs.append((r.revision(dp), r.revision(rev)))
1819
1953
1820 withthreads = threads > 0
1954 withthreads = threads > 0
1821 if not withthreads:
1955 if not withthreads:
1822 def d():
1956 def d():
1823 for pair in textpairs:
1957 for pair in textpairs:
1824 if xdiff:
1958 if xdiff:
1825 mdiff.bdiff.xdiffblocks(*pair)
1959 mdiff.bdiff.xdiffblocks(*pair)
1826 elif blocks:
1960 elif blocks:
1827 mdiff.bdiff.blocks(*pair)
1961 mdiff.bdiff.blocks(*pair)
1828 else:
1962 else:
1829 mdiff.textdiff(*pair)
1963 mdiff.textdiff(*pair)
1830 else:
1964 else:
1831 q = queue()
1965 q = queue()
1832 for i in _xrange(threads):
1966 for i in _xrange(threads):
1833 q.put(None)
1967 q.put(None)
1834 ready = threading.Condition()
1968 ready = threading.Condition()
1835 done = threading.Event()
1969 done = threading.Event()
1836 for i in _xrange(threads):
1970 for i in _xrange(threads):
1837 threading.Thread(target=_bdiffworker,
1971 threading.Thread(target=_bdiffworker,
1838 args=(q, blocks, xdiff, ready, done)).start()
1972 args=(q, blocks, xdiff, ready, done)).start()
1839 q.join()
1973 q.join()
1840 def d():
1974 def d():
1841 for pair in textpairs:
1975 for pair in textpairs:
1842 q.put(pair)
1976 q.put(pair)
1843 for i in _xrange(threads):
1977 for i in _xrange(threads):
1844 q.put(None)
1978 q.put(None)
1845 with ready:
1979 with ready:
1846 ready.notify_all()
1980 ready.notify_all()
1847 q.join()
1981 q.join()
1848 timer, fm = gettimer(ui, opts)
1982 timer, fm = gettimer(ui, opts)
1849 timer(d)
1983 timer(d)
1850 fm.end()
1984 fm.end()
1851
1985
1852 if withthreads:
1986 if withthreads:
1853 done.set()
1987 done.set()
1854 for i in _xrange(threads):
1988 for i in _xrange(threads):
1855 q.put(None)
1989 q.put(None)
1856 with ready:
1990 with ready:
1857 ready.notify_all()
1991 ready.notify_all()
1858
1992
1859 @command(b'perfunidiff', revlogopts + formatteropts + [
1993 @command(b'perfunidiff', revlogopts + formatteropts + [
1860 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1994 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1861 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
1995 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
1862 ], b'-c|-m|FILE REV')
1996 ], b'-c|-m|FILE REV')
1863 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1997 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1864 """benchmark a unified diff between revisions
1998 """benchmark a unified diff between revisions
1865
1999
1866 This doesn't include any copy tracing - it's just a unified diff
2000 This doesn't include any copy tracing - it's just a unified diff
1867 of the texts.
2001 of the texts.
1868
2002
1869 By default, benchmark a diff between its delta parent and itself.
2003 By default, benchmark a diff between its delta parent and itself.
1870
2004
1871 With ``--count``, benchmark diffs between delta parents and self for N
2005 With ``--count``, benchmark diffs between delta parents and self for N
1872 revisions starting at the specified revision.
2006 revisions starting at the specified revision.
1873
2007
1874 With ``--alldata``, assume the requested revision is a changeset and
2008 With ``--alldata``, assume the requested revision is a changeset and
1875 measure diffs for all changes related to that changeset (manifest
2009 measure diffs for all changes related to that changeset (manifest
1876 and filelogs).
2010 and filelogs).
1877 """
2011 """
1878 opts = _byteskwargs(opts)
2012 opts = _byteskwargs(opts)
1879 if opts[b'alldata']:
2013 if opts[b'alldata']:
1880 opts[b'changelog'] = True
2014 opts[b'changelog'] = True
1881
2015
1882 if opts.get(b'changelog') or opts.get(b'manifest'):
2016 if opts.get(b'changelog') or opts.get(b'manifest'):
1883 file_, rev = None, file_
2017 file_, rev = None, file_
1884 elif rev is None:
2018 elif rev is None:
1885 raise error.CommandError(b'perfunidiff', b'invalid arguments')
2019 raise error.CommandError(b'perfunidiff', b'invalid arguments')
1886
2020
1887 textpairs = []
2021 textpairs = []
1888
2022
1889 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
2023 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
1890
2024
1891 startrev = r.rev(r.lookup(rev))
2025 startrev = r.rev(r.lookup(rev))
1892 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2026 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1893 if opts[b'alldata']:
2027 if opts[b'alldata']:
1894 # Load revisions associated with changeset.
2028 # Load revisions associated with changeset.
1895 ctx = repo[rev]
2029 ctx = repo[rev]
1896 mtext = _manifestrevision(repo, ctx.manifestnode())
2030 mtext = _manifestrevision(repo, ctx.manifestnode())
1897 for pctx in ctx.parents():
2031 for pctx in ctx.parents():
1898 pman = _manifestrevision(repo, pctx.manifestnode())
2032 pman = _manifestrevision(repo, pctx.manifestnode())
1899 textpairs.append((pman, mtext))
2033 textpairs.append((pman, mtext))
1900
2034
1901 # Load filelog revisions by iterating manifest delta.
2035 # Load filelog revisions by iterating manifest delta.
1902 man = ctx.manifest()
2036 man = ctx.manifest()
1903 pman = ctx.p1().manifest()
2037 pman = ctx.p1().manifest()
1904 for filename, change in pman.diff(man).items():
2038 for filename, change in pman.diff(man).items():
1905 fctx = repo.file(filename)
2039 fctx = repo.file(filename)
1906 f1 = fctx.revision(change[0][0] or -1)
2040 f1 = fctx.revision(change[0][0] or -1)
1907 f2 = fctx.revision(change[1][0] or -1)
2041 f2 = fctx.revision(change[1][0] or -1)
1908 textpairs.append((f1, f2))
2042 textpairs.append((f1, f2))
1909 else:
2043 else:
1910 dp = r.deltaparent(rev)
2044 dp = r.deltaparent(rev)
1911 textpairs.append((r.revision(dp), r.revision(rev)))
2045 textpairs.append((r.revision(dp), r.revision(rev)))
1912
2046
1913 def d():
2047 def d():
1914 for left, right in textpairs:
2048 for left, right in textpairs:
1915 # The date strings don't matter, so we pass empty strings.
2049 # The date strings don't matter, so we pass empty strings.
1916 headerlines, hunks = mdiff.unidiff(
2050 headerlines, hunks = mdiff.unidiff(
1917 left, b'', right, b'', b'left', b'right', binary=False)
2051 left, b'', right, b'', b'left', b'right', binary=False)
1918 # consume iterators in roughly the way patch.py does
2052 # consume iterators in roughly the way patch.py does
1919 b'\n'.join(headerlines)
2053 b'\n'.join(headerlines)
1920 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2054 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1921 timer, fm = gettimer(ui, opts)
2055 timer, fm = gettimer(ui, opts)
1922 timer(d)
2056 timer(d)
1923 fm.end()
2057 fm.end()
1924
2058
1925 @command(b'perfdiffwd', formatteropts)
2059 @command(b'perfdiffwd', formatteropts)
1926 def perfdiffwd(ui, repo, **opts):
2060 def perfdiffwd(ui, repo, **opts):
1927 """Profile diff of working directory changes"""
2061 """Profile diff of working directory changes"""
1928 opts = _byteskwargs(opts)
2062 opts = _byteskwargs(opts)
1929 timer, fm = gettimer(ui, opts)
2063 timer, fm = gettimer(ui, opts)
1930 options = {
2064 options = {
1931 'w': 'ignore_all_space',
2065 'w': 'ignore_all_space',
1932 'b': 'ignore_space_change',
2066 'b': 'ignore_space_change',
1933 'B': 'ignore_blank_lines',
2067 'B': 'ignore_blank_lines',
1934 }
2068 }
1935
2069
1936 for diffopt in ('', 'w', 'b', 'B', 'wB'):
2070 for diffopt in ('', 'w', 'b', 'B', 'wB'):
1937 opts = dict((options[c], b'1') for c in diffopt)
2071 opts = dict((options[c], b'1') for c in diffopt)
1938 def d():
2072 def d():
1939 ui.pushbuffer()
2073 ui.pushbuffer()
1940 commands.diff(ui, repo, **opts)
2074 commands.diff(ui, repo, **opts)
1941 ui.popbuffer()
2075 ui.popbuffer()
1942 diffopt = diffopt.encode('ascii')
2076 diffopt = diffopt.encode('ascii')
1943 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
2077 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
1944 timer(d, title=title)
2078 timer(d, title=title)
1945 fm.end()
2079 fm.end()
1946
2080
1947 @command(b'perfrevlogindex', revlogopts + formatteropts,
2081 @command(b'perfrevlogindex', revlogopts + formatteropts,
1948 b'-c|-m|FILE')
2082 b'-c|-m|FILE')
1949 def perfrevlogindex(ui, repo, file_=None, **opts):
2083 def perfrevlogindex(ui, repo, file_=None, **opts):
1950 """Benchmark operations against a revlog index.
2084 """Benchmark operations against a revlog index.
1951
2085
1952 This tests constructing a revlog instance, reading index data,
2086 This tests constructing a revlog instance, reading index data,
1953 parsing index data, and performing various operations related to
2087 parsing index data, and performing various operations related to
1954 index data.
2088 index data.
1955 """
2089 """
1956
2090
1957 opts = _byteskwargs(opts)
2091 opts = _byteskwargs(opts)
1958
2092
1959 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
2093 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
1960
2094
1961 opener = getattr(rl, 'opener') # trick linter
2095 opener = getattr(rl, 'opener') # trick linter
1962 indexfile = rl.indexfile
2096 indexfile = rl.indexfile
1963 data = opener.read(indexfile)
2097 data = opener.read(indexfile)
1964
2098
1965 header = struct.unpack(b'>I', data[0:4])[0]
2099 header = struct.unpack(b'>I', data[0:4])[0]
1966 version = header & 0xFFFF
2100 version = header & 0xFFFF
1967 if version == 1:
2101 if version == 1:
1968 revlogio = revlog.revlogio()
2102 revlogio = revlog.revlogio()
1969 inline = header & (1 << 16)
2103 inline = header & (1 << 16)
1970 else:
2104 else:
1971 raise error.Abort((b'unsupported revlog version: %d') % version)
2105 raise error.Abort((b'unsupported revlog version: %d') % version)
1972
2106
1973 rllen = len(rl)
2107 rllen = len(rl)
1974
2108
1975 node0 = rl.node(0)
2109 node0 = rl.node(0)
1976 node25 = rl.node(rllen // 4)
2110 node25 = rl.node(rllen // 4)
1977 node50 = rl.node(rllen // 2)
2111 node50 = rl.node(rllen // 2)
1978 node75 = rl.node(rllen // 4 * 3)
2112 node75 = rl.node(rllen // 4 * 3)
1979 node100 = rl.node(rllen - 1)
2113 node100 = rl.node(rllen - 1)
1980
2114
1981 allrevs = range(rllen)
2115 allrevs = range(rllen)
1982 allrevsrev = list(reversed(allrevs))
2116 allrevsrev = list(reversed(allrevs))
1983 allnodes = [rl.node(rev) for rev in range(rllen)]
2117 allnodes = [rl.node(rev) for rev in range(rllen)]
1984 allnodesrev = list(reversed(allnodes))
2118 allnodesrev = list(reversed(allnodes))
1985
2119
1986 def constructor():
2120 def constructor():
1987 revlog.revlog(opener, indexfile)
2121 revlog.revlog(opener, indexfile)
1988
2122
1989 def read():
2123 def read():
1990 with opener(indexfile) as fh:
2124 with opener(indexfile) as fh:
1991 fh.read()
2125 fh.read()
1992
2126
1993 def parseindex():
2127 def parseindex():
1994 revlogio.parseindex(data, inline)
2128 revlogio.parseindex(data, inline)
1995
2129
1996 def getentry(revornode):
2130 def getentry(revornode):
1997 index = revlogio.parseindex(data, inline)[0]
2131 index = revlogio.parseindex(data, inline)[0]
1998 index[revornode]
2132 index[revornode]
1999
2133
2000 def getentries(revs, count=1):
2134 def getentries(revs, count=1):
2001 index = revlogio.parseindex(data, inline)[0]
2135 index = revlogio.parseindex(data, inline)[0]
2002
2136
2003 for i in range(count):
2137 for i in range(count):
2004 for rev in revs:
2138 for rev in revs:
2005 index[rev]
2139 index[rev]
2006
2140
2007 def resolvenode(node):
2141 def resolvenode(node):
2008 nodemap = revlogio.parseindex(data, inline)[1]
2142 nodemap = revlogio.parseindex(data, inline)[1]
2009 # This only works for the C code.
2143 # This only works for the C code.
2010 if nodemap is None:
2144 if nodemap is None:
2011 return
2145 return
2012
2146
2013 try:
2147 try:
2014 nodemap[node]
2148 nodemap[node]
2015 except error.RevlogError:
2149 except error.RevlogError:
2016 pass
2150 pass
2017
2151
2018 def resolvenodes(nodes, count=1):
2152 def resolvenodes(nodes, count=1):
2019 nodemap = revlogio.parseindex(data, inline)[1]
2153 nodemap = revlogio.parseindex(data, inline)[1]
2020 if nodemap is None:
2154 if nodemap is None:
2021 return
2155 return
2022
2156
2023 for i in range(count):
2157 for i in range(count):
2024 for node in nodes:
2158 for node in nodes:
2025 try:
2159 try:
2026 nodemap[node]
2160 nodemap[node]
2027 except error.RevlogError:
2161 except error.RevlogError:
2028 pass
2162 pass
2029
2163
2030 benches = [
2164 benches = [
2031 (constructor, b'revlog constructor'),
2165 (constructor, b'revlog constructor'),
2032 (read, b'read'),
2166 (read, b'read'),
2033 (parseindex, b'create index object'),
2167 (parseindex, b'create index object'),
2034 (lambda: getentry(0), b'retrieve index entry for rev 0'),
2168 (lambda: getentry(0), b'retrieve index entry for rev 0'),
2035 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
2169 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
2036 (lambda: resolvenode(node0), b'look up node at rev 0'),
2170 (lambda: resolvenode(node0), b'look up node at rev 0'),
2037 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
2171 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
2038 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
2172 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
2039 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
2173 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
2040 (lambda: resolvenode(node100), b'look up node at tip'),
2174 (lambda: resolvenode(node100), b'look up node at tip'),
2041 # 2x variation is to measure caching impact.
2175 # 2x variation is to measure caching impact.
2042 (lambda: resolvenodes(allnodes),
2176 (lambda: resolvenodes(allnodes),
2043 b'look up all nodes (forward)'),
2177 b'look up all nodes (forward)'),
2044 (lambda: resolvenodes(allnodes, 2),
2178 (lambda: resolvenodes(allnodes, 2),
2045 b'look up all nodes 2x (forward)'),
2179 b'look up all nodes 2x (forward)'),
2046 (lambda: resolvenodes(allnodesrev),
2180 (lambda: resolvenodes(allnodesrev),
2047 b'look up all nodes (reverse)'),
2181 b'look up all nodes (reverse)'),
2048 (lambda: resolvenodes(allnodesrev, 2),
2182 (lambda: resolvenodes(allnodesrev, 2),
2049 b'look up all nodes 2x (reverse)'),
2183 b'look up all nodes 2x (reverse)'),
2050 (lambda: getentries(allrevs),
2184 (lambda: getentries(allrevs),
2051 b'retrieve all index entries (forward)'),
2185 b'retrieve all index entries (forward)'),
2052 (lambda: getentries(allrevs, 2),
2186 (lambda: getentries(allrevs, 2),
2053 b'retrieve all index entries 2x (forward)'),
2187 b'retrieve all index entries 2x (forward)'),
2054 (lambda: getentries(allrevsrev),
2188 (lambda: getentries(allrevsrev),
2055 b'retrieve all index entries (reverse)'),
2189 b'retrieve all index entries (reverse)'),
2056 (lambda: getentries(allrevsrev, 2),
2190 (lambda: getentries(allrevsrev, 2),
2057 b'retrieve all index entries 2x (reverse)'),
2191 b'retrieve all index entries 2x (reverse)'),
2058 ]
2192 ]
2059
2193
2060 for fn, title in benches:
2194 for fn, title in benches:
2061 timer, fm = gettimer(ui, opts)
2195 timer, fm = gettimer(ui, opts)
2062 timer(fn, title=title)
2196 timer(fn, title=title)
2063 fm.end()
2197 fm.end()
2064
2198
2065 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
2199 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
2066 [(b'd', b'dist', 100, b'distance between the revisions'),
2200 [(b'd', b'dist', 100, b'distance between the revisions'),
2067 (b's', b'startrev', 0, b'revision to start reading at'),
2201 (b's', b'startrev', 0, b'revision to start reading at'),
2068 (b'', b'reverse', False, b'read in reverse')],
2202 (b'', b'reverse', False, b'read in reverse')],
2069 b'-c|-m|FILE')
2203 b'-c|-m|FILE')
2070 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
2204 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
2071 **opts):
2205 **opts):
2072 """Benchmark reading a series of revisions from a revlog.
2206 """Benchmark reading a series of revisions from a revlog.
2073
2207
2074 By default, we read every ``-d/--dist`` revision from 0 to tip of
2208 By default, we read every ``-d/--dist`` revision from 0 to tip of
2075 the specified revlog.
2209 the specified revlog.
2076
2210
2077 The start revision can be defined via ``-s/--startrev``.
2211 The start revision can be defined via ``-s/--startrev``.
2078 """
2212 """
2079 opts = _byteskwargs(opts)
2213 opts = _byteskwargs(opts)
2080
2214
2081 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
2215 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
2082 rllen = getlen(ui)(rl)
2216 rllen = getlen(ui)(rl)
2083
2217
2084 if startrev < 0:
2218 if startrev < 0:
2085 startrev = rllen + startrev
2219 startrev = rllen + startrev
2086
2220
2087 def d():
2221 def d():
2088 rl.clearcaches()
2222 rl.clearcaches()
2089
2223
2090 beginrev = startrev
2224 beginrev = startrev
2091 endrev = rllen
2225 endrev = rllen
2092 dist = opts[b'dist']
2226 dist = opts[b'dist']
2093
2227
2094 if reverse:
2228 if reverse:
2095 beginrev, endrev = endrev - 1, beginrev - 1
2229 beginrev, endrev = endrev - 1, beginrev - 1
2096 dist = -1 * dist
2230 dist = -1 * dist
2097
2231
2098 for x in _xrange(beginrev, endrev, dist):
2232 for x in _xrange(beginrev, endrev, dist):
2099 # Old revisions don't support passing int.
2233 # Old revisions don't support passing int.
2100 n = rl.node(x)
2234 n = rl.node(x)
2101 rl.revision(n)
2235 rl.revision(n)
2102
2236
2103 timer, fm = gettimer(ui, opts)
2237 timer, fm = gettimer(ui, opts)
2104 timer(d)
2238 timer(d)
2105 fm.end()
2239 fm.end()
2106
2240
2107 @command(b'perfrevlogwrite', revlogopts + formatteropts +
2241 @command(b'perfrevlogwrite', revlogopts + formatteropts +
2108 [(b's', b'startrev', 1000, b'revision to start writing at'),
2242 [(b's', b'startrev', 1000, b'revision to start writing at'),
2109 (b'', b'stoprev', -1, b'last revision to write'),
2243 (b'', b'stoprev', -1, b'last revision to write'),
2110 (b'', b'count', 3, b'number of passes to perform'),
2244 (b'', b'count', 3, b'number of passes to perform'),
2111 (b'', b'details', False, b'print timing for every revisions tested'),
2245 (b'', b'details', False, b'print timing for every revisions tested'),
2112 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
2246 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
2113 (b'', b'lazydeltabase', True, b'try the provided delta first'),
2247 (b'', b'lazydeltabase', True, b'try the provided delta first'),
2114 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
2248 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
2115 ],
2249 ],
2116 b'-c|-m|FILE')
2250 b'-c|-m|FILE')
2117 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
2251 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
2118 """Benchmark writing a series of revisions to a revlog.
2252 """Benchmark writing a series of revisions to a revlog.
2119
2253
2120 Possible source values are:
2254 Possible source values are:
2121 * `full`: add from a full text (default).
2255 * `full`: add from a full text (default).
2122 * `parent-1`: add from a delta to the first parent
2256 * `parent-1`: add from a delta to the first parent
2123 * `parent-2`: add from a delta to the second parent if it exists
2257 * `parent-2`: add from a delta to the second parent if it exists
2124 (use a delta from the first parent otherwise)
2258 (use a delta from the first parent otherwise)
2125 * `parent-smallest`: add from the smallest delta (either p1 or p2)
2259 * `parent-smallest`: add from the smallest delta (either p1 or p2)
2126 * `storage`: add from the existing precomputed deltas
2260 * `storage`: add from the existing precomputed deltas
2127
2261
2128 Note: This performance command measures performance in a custom way. As a
2262 Note: This performance command measures performance in a custom way. As a
2129 result some of the global configuration of the 'perf' command does not
2263 result some of the global configuration of the 'perf' command does not
2130 apply to it:
2264 apply to it:
2131
2265
2132 * ``pre-run``: disabled
2266 * ``pre-run``: disabled
2133
2267
2134 * ``profile-benchmark``: disabled
2268 * ``profile-benchmark``: disabled
2135
2269
2136 * ``run-limits``: disabled use --count instead
2270 * ``run-limits``: disabled use --count instead
2137 """
2271 """
2138 opts = _byteskwargs(opts)
2272 opts = _byteskwargs(opts)
2139
2273
2140 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
2274 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
2141 rllen = getlen(ui)(rl)
2275 rllen = getlen(ui)(rl)
2142 if startrev < 0:
2276 if startrev < 0:
2143 startrev = rllen + startrev
2277 startrev = rllen + startrev
2144 if stoprev < 0:
2278 if stoprev < 0:
2145 stoprev = rllen + stoprev
2279 stoprev = rllen + stoprev
2146
2280
2147 lazydeltabase = opts['lazydeltabase']
2281 lazydeltabase = opts['lazydeltabase']
2148 source = opts['source']
2282 source = opts['source']
2149 clearcaches = opts['clear_caches']
2283 clearcaches = opts['clear_caches']
2150 validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest',
2284 validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest',
2151 b'storage')
2285 b'storage')
2152 if source not in validsource:
2286 if source not in validsource:
2153 raise error.Abort('invalid source type: %s' % source)
2287 raise error.Abort('invalid source type: %s' % source)
2154
2288
2155 ### actually gather results
2289 ### actually gather results
2156 count = opts['count']
2290 count = opts['count']
2157 if count <= 0:
2291 if count <= 0:
2158 raise error.Abort('invalide run count: %d' % count)
2292 raise error.Abort('invalide run count: %d' % count)
2159 allresults = []
2293 allresults = []
2160 for c in range(count):
2294 for c in range(count):
2161 timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1,
2295 timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1,
2162 lazydeltabase=lazydeltabase,
2296 lazydeltabase=lazydeltabase,
2163 clearcaches=clearcaches)
2297 clearcaches=clearcaches)
2164 allresults.append(timing)
2298 allresults.append(timing)
2165
2299
2166 ### consolidate the results in a single list
2300 ### consolidate the results in a single list
2167 results = []
2301 results = []
2168 for idx, (rev, t) in enumerate(allresults[0]):
2302 for idx, (rev, t) in enumerate(allresults[0]):
2169 ts = [t]
2303 ts = [t]
2170 for other in allresults[1:]:
2304 for other in allresults[1:]:
2171 orev, ot = other[idx]
2305 orev, ot = other[idx]
2172 assert orev == rev
2306 assert orev == rev
2173 ts.append(ot)
2307 ts.append(ot)
2174 results.append((rev, ts))
2308 results.append((rev, ts))
2175 resultcount = len(results)
2309 resultcount = len(results)
2176
2310
2177 ### Compute and display relevant statistics
2311 ### Compute and display relevant statistics
2178
2312
2179 # get a formatter
2313 # get a formatter
2180 fm = ui.formatter(b'perf', opts)
2314 fm = ui.formatter(b'perf', opts)
2181 displayall = ui.configbool(b"perf", b"all-timing", False)
2315 displayall = ui.configbool(b"perf", b"all-timing", False)
2182
2316
2183 # print individual details if requested
2317 # print individual details if requested
2184 if opts['details']:
2318 if opts['details']:
2185 for idx, item in enumerate(results, 1):
2319 for idx, item in enumerate(results, 1):
2186 rev, data = item
2320 rev, data = item
2187 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
2321 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
2188 formatone(fm, data, title=title, displayall=displayall)
2322 formatone(fm, data, title=title, displayall=displayall)
2189
2323
2190 # sorts results by median time
2324 # sorts results by median time
2191 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
2325 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
2192 # list of (name, index) to display)
2326 # list of (name, index) to display)
2193 relevants = [
2327 relevants = [
2194 ("min", 0),
2328 ("min", 0),
2195 ("10%", resultcount * 10 // 100),
2329 ("10%", resultcount * 10 // 100),
2196 ("25%", resultcount * 25 // 100),
2330 ("25%", resultcount * 25 // 100),
2197 ("50%", resultcount * 70 // 100),
2331 ("50%", resultcount * 70 // 100),
2198 ("75%", resultcount * 75 // 100),
2332 ("75%", resultcount * 75 // 100),
2199 ("90%", resultcount * 90 // 100),
2333 ("90%", resultcount * 90 // 100),
2200 ("95%", resultcount * 95 // 100),
2334 ("95%", resultcount * 95 // 100),
2201 ("99%", resultcount * 99 // 100),
2335 ("99%", resultcount * 99 // 100),
2202 ("99.9%", resultcount * 999 // 1000),
2336 ("99.9%", resultcount * 999 // 1000),
2203 ("99.99%", resultcount * 9999 // 10000),
2337 ("99.99%", resultcount * 9999 // 10000),
2204 ("99.999%", resultcount * 99999 // 100000),
2338 ("99.999%", resultcount * 99999 // 100000),
2205 ("max", -1),
2339 ("max", -1),
2206 ]
2340 ]
2207 if not ui.quiet:
2341 if not ui.quiet:
2208 for name, idx in relevants:
2342 for name, idx in relevants:
2209 data = results[idx]
2343 data = results[idx]
2210 title = '%s of %d, rev %d' % (name, resultcount, data[0])
2344 title = '%s of %d, rev %d' % (name, resultcount, data[0])
2211 formatone(fm, data[1], title=title, displayall=displayall)
2345 formatone(fm, data[1], title=title, displayall=displayall)
2212
2346
2213 # XXX summing that many float will not be very precise, we ignore this fact
2347 # XXX summing that many float will not be very precise, we ignore this fact
2214 # for now
2348 # for now
2215 totaltime = []
2349 totaltime = []
2216 for item in allresults:
2350 for item in allresults:
2217 totaltime.append((sum(x[1][0] for x in item),
2351 totaltime.append((sum(x[1][0] for x in item),
2218 sum(x[1][1] for x in item),
2352 sum(x[1][1] for x in item),
2219 sum(x[1][2] for x in item),)
2353 sum(x[1][2] for x in item),)
2220 )
2354 )
2221 formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
2355 formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
2222 displayall=displayall)
2356 displayall=displayall)
2223 fm.end()
2357 fm.end()
2224
2358
2225 class _faketr(object):
2359 class _faketr(object):
2226 def add(s, x, y, z=None):
2360 def add(s, x, y, z=None):
2227 return None
2361 return None
2228
2362
2229 def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None,
2363 def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None,
2230 lazydeltabase=True, clearcaches=True):
2364 lazydeltabase=True, clearcaches=True):
2231 timings = []
2365 timings = []
2232 tr = _faketr()
2366 tr = _faketr()
2233 with _temprevlog(ui, orig, startrev) as dest:
2367 with _temprevlog(ui, orig, startrev) as dest:
2234 dest._lazydeltabase = lazydeltabase
2368 dest._lazydeltabase = lazydeltabase
2235 revs = list(orig.revs(startrev, stoprev))
2369 revs = list(orig.revs(startrev, stoprev))
2236 total = len(revs)
2370 total = len(revs)
2237 topic = 'adding'
2371 topic = 'adding'
2238 if runidx is not None:
2372 if runidx is not None:
2239 topic += ' (run #%d)' % runidx
2373 topic += ' (run #%d)' % runidx
2240 # Support both old and new progress API
2374 # Support both old and new progress API
2241 if util.safehasattr(ui, 'makeprogress'):
2375 if util.safehasattr(ui, 'makeprogress'):
2242 progress = ui.makeprogress(topic, unit='revs', total=total)
2376 progress = ui.makeprogress(topic, unit='revs', total=total)
2243 def updateprogress(pos):
2377 def updateprogress(pos):
2244 progress.update(pos)
2378 progress.update(pos)
2245 def completeprogress():
2379 def completeprogress():
2246 progress.complete()
2380 progress.complete()
2247 else:
2381 else:
2248 def updateprogress(pos):
2382 def updateprogress(pos):
2249 ui.progress(topic, pos, unit='revs', total=total)
2383 ui.progress(topic, pos, unit='revs', total=total)
2250 def completeprogress():
2384 def completeprogress():
2251 ui.progress(topic, None, unit='revs', total=total)
2385 ui.progress(topic, None, unit='revs', total=total)
2252
2386
2253 for idx, rev in enumerate(revs):
2387 for idx, rev in enumerate(revs):
2254 updateprogress(idx)
2388 updateprogress(idx)
2255 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
2389 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
2256 if clearcaches:
2390 if clearcaches:
2257 dest.index.clearcaches()
2391 dest.index.clearcaches()
2258 dest.clearcaches()
2392 dest.clearcaches()
2259 with timeone() as r:
2393 with timeone() as r:
2260 dest.addrawrevision(*addargs, **addkwargs)
2394 dest.addrawrevision(*addargs, **addkwargs)
2261 timings.append((rev, r[0]))
2395 timings.append((rev, r[0]))
2262 updateprogress(total)
2396 updateprogress(total)
2263 completeprogress()
2397 completeprogress()
2264 return timings
2398 return timings
2265
2399
2266 def _getrevisionseed(orig, rev, tr, source):
2400 def _getrevisionseed(orig, rev, tr, source):
2267 from mercurial.node import nullid
2401 from mercurial.node import nullid
2268
2402
2269 linkrev = orig.linkrev(rev)
2403 linkrev = orig.linkrev(rev)
2270 node = orig.node(rev)
2404 node = orig.node(rev)
2271 p1, p2 = orig.parents(node)
2405 p1, p2 = orig.parents(node)
2272 flags = orig.flags(rev)
2406 flags = orig.flags(rev)
2273 cachedelta = None
2407 cachedelta = None
2274 text = None
2408 text = None
2275
2409
2276 if source == b'full':
2410 if source == b'full':
2277 text = orig.revision(rev)
2411 text = orig.revision(rev)
2278 elif source == b'parent-1':
2412 elif source == b'parent-1':
2279 baserev = orig.rev(p1)
2413 baserev = orig.rev(p1)
2280 cachedelta = (baserev, orig.revdiff(p1, rev))
2414 cachedelta = (baserev, orig.revdiff(p1, rev))
2281 elif source == b'parent-2':
2415 elif source == b'parent-2':
2282 parent = p2
2416 parent = p2
2283 if p2 == nullid:
2417 if p2 == nullid:
2284 parent = p1
2418 parent = p1
2285 baserev = orig.rev(parent)
2419 baserev = orig.rev(parent)
2286 cachedelta = (baserev, orig.revdiff(parent, rev))
2420 cachedelta = (baserev, orig.revdiff(parent, rev))
2287 elif source == b'parent-smallest':
2421 elif source == b'parent-smallest':
2288 p1diff = orig.revdiff(p1, rev)
2422 p1diff = orig.revdiff(p1, rev)
2289 parent = p1
2423 parent = p1
2290 diff = p1diff
2424 diff = p1diff
2291 if p2 != nullid:
2425 if p2 != nullid:
2292 p2diff = orig.revdiff(p2, rev)
2426 p2diff = orig.revdiff(p2, rev)
2293 if len(p1diff) > len(p2diff):
2427 if len(p1diff) > len(p2diff):
2294 parent = p2
2428 parent = p2
2295 diff = p2diff
2429 diff = p2diff
2296 baserev = orig.rev(parent)
2430 baserev = orig.rev(parent)
2297 cachedelta = (baserev, diff)
2431 cachedelta = (baserev, diff)
2298 elif source == b'storage':
2432 elif source == b'storage':
2299 baserev = orig.deltaparent(rev)
2433 baserev = orig.deltaparent(rev)
2300 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
2434 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
2301
2435
2302 return ((text, tr, linkrev, p1, p2),
2436 return ((text, tr, linkrev, p1, p2),
2303 {'node': node, 'flags': flags, 'cachedelta': cachedelta})
2437 {'node': node, 'flags': flags, 'cachedelta': cachedelta})
2304
2438
2305 @contextlib.contextmanager
2439 @contextlib.contextmanager
2306 def _temprevlog(ui, orig, truncaterev):
2440 def _temprevlog(ui, orig, truncaterev):
2307 from mercurial import vfs as vfsmod
2441 from mercurial import vfs as vfsmod
2308
2442
2309 if orig._inline:
2443 if orig._inline:
2310 raise error.Abort('not supporting inline revlog (yet)')
2444 raise error.Abort('not supporting inline revlog (yet)')
2311 revlogkwargs = {}
2445 revlogkwargs = {}
2312 k = 'upperboundcomp'
2446 k = 'upperboundcomp'
2313 if util.safehasattr(orig, k):
2447 if util.safehasattr(orig, k):
2314 revlogkwargs[k] = getattr(orig, k)
2448 revlogkwargs[k] = getattr(orig, k)
2315
2449
2316 origindexpath = orig.opener.join(orig.indexfile)
2450 origindexpath = orig.opener.join(orig.indexfile)
2317 origdatapath = orig.opener.join(orig.datafile)
2451 origdatapath = orig.opener.join(orig.datafile)
2318 indexname = 'revlog.i'
2452 indexname = 'revlog.i'
2319 dataname = 'revlog.d'
2453 dataname = 'revlog.d'
2320
2454
2321 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
2455 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
2322 try:
2456 try:
2323 # copy the data file in a temporary directory
2457 # copy the data file in a temporary directory
2324 ui.debug('copying data in %s\n' % tmpdir)
2458 ui.debug('copying data in %s\n' % tmpdir)
2325 destindexpath = os.path.join(tmpdir, 'revlog.i')
2459 destindexpath = os.path.join(tmpdir, 'revlog.i')
2326 destdatapath = os.path.join(tmpdir, 'revlog.d')
2460 destdatapath = os.path.join(tmpdir, 'revlog.d')
2327 shutil.copyfile(origindexpath, destindexpath)
2461 shutil.copyfile(origindexpath, destindexpath)
2328 shutil.copyfile(origdatapath, destdatapath)
2462 shutil.copyfile(origdatapath, destdatapath)
2329
2463
2330 # remove the data we want to add again
2464 # remove the data we want to add again
2331 ui.debug('truncating data to be rewritten\n')
2465 ui.debug('truncating data to be rewritten\n')
2332 with open(destindexpath, 'ab') as index:
2466 with open(destindexpath, 'ab') as index:
2333 index.seek(0)
2467 index.seek(0)
2334 index.truncate(truncaterev * orig._io.size)
2468 index.truncate(truncaterev * orig._io.size)
2335 with open(destdatapath, 'ab') as data:
2469 with open(destdatapath, 'ab') as data:
2336 data.seek(0)
2470 data.seek(0)
2337 data.truncate(orig.start(truncaterev))
2471 data.truncate(orig.start(truncaterev))
2338
2472
2339 # instantiate a new revlog from the temporary copy
2473 # instantiate a new revlog from the temporary copy
2340 ui.debug('truncating adding to be rewritten\n')
2474 ui.debug('truncating adding to be rewritten\n')
2341 vfs = vfsmod.vfs(tmpdir)
2475 vfs = vfsmod.vfs(tmpdir)
2342 vfs.options = getattr(orig.opener, 'options', None)
2476 vfs.options = getattr(orig.opener, 'options', None)
2343
2477
2344 dest = revlog.revlog(vfs,
2478 dest = revlog.revlog(vfs,
2345 indexfile=indexname,
2479 indexfile=indexname,
2346 datafile=dataname, **revlogkwargs)
2480 datafile=dataname, **revlogkwargs)
2347 if dest._inline:
2481 if dest._inline:
2348 raise error.Abort('not supporting inline revlog (yet)')
2482 raise error.Abort('not supporting inline revlog (yet)')
2349 # make sure internals are initialized
2483 # make sure internals are initialized
2350 dest.revision(len(dest) - 1)
2484 dest.revision(len(dest) - 1)
2351 yield dest
2485 yield dest
2352 del dest, vfs
2486 del dest, vfs
2353 finally:
2487 finally:
2354 shutil.rmtree(tmpdir, True)
2488 shutil.rmtree(tmpdir, True)
2355
2489
2356 @command(b'perfrevlogchunks', revlogopts + formatteropts +
2490 @command(b'perfrevlogchunks', revlogopts + formatteropts +
2357 [(b'e', b'engines', b'', b'compression engines to use'),
2491 [(b'e', b'engines', b'', b'compression engines to use'),
2358 (b's', b'startrev', 0, b'revision to start at')],
2492 (b's', b'startrev', 0, b'revision to start at')],
2359 b'-c|-m|FILE')
2493 b'-c|-m|FILE')
2360 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
2494 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
2361 """Benchmark operations on revlog chunks.
2495 """Benchmark operations on revlog chunks.
2362
2496
2363 Logically, each revlog is a collection of fulltext revisions. However,
2497 Logically, each revlog is a collection of fulltext revisions. However,
2364 stored within each revlog are "chunks" of possibly compressed data. This
2498 stored within each revlog are "chunks" of possibly compressed data. This
2365 data needs to be read and decompressed or compressed and written.
2499 data needs to be read and decompressed or compressed and written.
2366
2500
2367 This command measures the time it takes to read+decompress and recompress
2501 This command measures the time it takes to read+decompress and recompress
2368 chunks in a revlog. It effectively isolates I/O and compression performance.
2502 chunks in a revlog. It effectively isolates I/O and compression performance.
2369 For measurements of higher-level operations like resolving revisions,
2503 For measurements of higher-level operations like resolving revisions,
2370 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
2504 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
2371 """
2505 """
2372 opts = _byteskwargs(opts)
2506 opts = _byteskwargs(opts)
2373
2507
2374 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
2508 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
2375
2509
2376 # _chunkraw was renamed to _getsegmentforrevs.
2510 # _chunkraw was renamed to _getsegmentforrevs.
2377 try:
2511 try:
2378 segmentforrevs = rl._getsegmentforrevs
2512 segmentforrevs = rl._getsegmentforrevs
2379 except AttributeError:
2513 except AttributeError:
2380 segmentforrevs = rl._chunkraw
2514 segmentforrevs = rl._chunkraw
2381
2515
2382 # Verify engines argument.
2516 # Verify engines argument.
2383 if engines:
2517 if engines:
2384 engines = set(e.strip() for e in engines.split(b','))
2518 engines = set(e.strip() for e in engines.split(b','))
2385 for engine in engines:
2519 for engine in engines:
2386 try:
2520 try:
2387 util.compressionengines[engine]
2521 util.compressionengines[engine]
2388 except KeyError:
2522 except KeyError:
2389 raise error.Abort(b'unknown compression engine: %s' % engine)
2523 raise error.Abort(b'unknown compression engine: %s' % engine)
2390 else:
2524 else:
2391 engines = []
2525 engines = []
2392 for e in util.compengines:
2526 for e in util.compengines:
2393 engine = util.compengines[e]
2527 engine = util.compengines[e]
2394 try:
2528 try:
2395 if engine.available():
2529 if engine.available():
2396 engine.revlogcompressor().compress(b'dummy')
2530 engine.revlogcompressor().compress(b'dummy')
2397 engines.append(e)
2531 engines.append(e)
2398 except NotImplementedError:
2532 except NotImplementedError:
2399 pass
2533 pass
2400
2534
2401 revs = list(rl.revs(startrev, len(rl) - 1))
2535 revs = list(rl.revs(startrev, len(rl) - 1))
2402
2536
2403 def rlfh(rl):
2537 def rlfh(rl):
2404 if rl._inline:
2538 if rl._inline:
2405 return getsvfs(repo)(rl.indexfile)
2539 return getsvfs(repo)(rl.indexfile)
2406 else:
2540 else:
2407 return getsvfs(repo)(rl.datafile)
2541 return getsvfs(repo)(rl.datafile)
2408
2542
2409 def doread():
2543 def doread():
2410 rl.clearcaches()
2544 rl.clearcaches()
2411 for rev in revs:
2545 for rev in revs:
2412 segmentforrevs(rev, rev)
2546 segmentforrevs(rev, rev)
2413
2547
2414 def doreadcachedfh():
2548 def doreadcachedfh():
2415 rl.clearcaches()
2549 rl.clearcaches()
2416 fh = rlfh(rl)
2550 fh = rlfh(rl)
2417 for rev in revs:
2551 for rev in revs:
2418 segmentforrevs(rev, rev, df=fh)
2552 segmentforrevs(rev, rev, df=fh)
2419
2553
2420 def doreadbatch():
2554 def doreadbatch():
2421 rl.clearcaches()
2555 rl.clearcaches()
2422 segmentforrevs(revs[0], revs[-1])
2556 segmentforrevs(revs[0], revs[-1])
2423
2557
2424 def doreadbatchcachedfh():
2558 def doreadbatchcachedfh():
2425 rl.clearcaches()
2559 rl.clearcaches()
2426 fh = rlfh(rl)
2560 fh = rlfh(rl)
2427 segmentforrevs(revs[0], revs[-1], df=fh)
2561 segmentforrevs(revs[0], revs[-1], df=fh)
2428
2562
2429 def dochunk():
2563 def dochunk():
2430 rl.clearcaches()
2564 rl.clearcaches()
2431 fh = rlfh(rl)
2565 fh = rlfh(rl)
2432 for rev in revs:
2566 for rev in revs:
2433 rl._chunk(rev, df=fh)
2567 rl._chunk(rev, df=fh)
2434
2568
2435 chunks = [None]
2569 chunks = [None]
2436
2570
2437 def dochunkbatch():
2571 def dochunkbatch():
2438 rl.clearcaches()
2572 rl.clearcaches()
2439 fh = rlfh(rl)
2573 fh = rlfh(rl)
2440 # Save chunks as a side-effect.
2574 # Save chunks as a side-effect.
2441 chunks[0] = rl._chunks(revs, df=fh)
2575 chunks[0] = rl._chunks(revs, df=fh)
2442
2576
2443 def docompress(compressor):
2577 def docompress(compressor):
2444 rl.clearcaches()
2578 rl.clearcaches()
2445
2579
2446 try:
2580 try:
2447 # Swap in the requested compression engine.
2581 # Swap in the requested compression engine.
2448 oldcompressor = rl._compressor
2582 oldcompressor = rl._compressor
2449 rl._compressor = compressor
2583 rl._compressor = compressor
2450 for chunk in chunks[0]:
2584 for chunk in chunks[0]:
2451 rl.compress(chunk)
2585 rl.compress(chunk)
2452 finally:
2586 finally:
2453 rl._compressor = oldcompressor
2587 rl._compressor = oldcompressor
2454
2588
2455 benches = [
2589 benches = [
2456 (lambda: doread(), b'read'),
2590 (lambda: doread(), b'read'),
2457 (lambda: doreadcachedfh(), b'read w/ reused fd'),
2591 (lambda: doreadcachedfh(), b'read w/ reused fd'),
2458 (lambda: doreadbatch(), b'read batch'),
2592 (lambda: doreadbatch(), b'read batch'),
2459 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
2593 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
2460 (lambda: dochunk(), b'chunk'),
2594 (lambda: dochunk(), b'chunk'),
2461 (lambda: dochunkbatch(), b'chunk batch'),
2595 (lambda: dochunkbatch(), b'chunk batch'),
2462 ]
2596 ]
2463
2597
2464 for engine in sorted(engines):
2598 for engine in sorted(engines):
2465 compressor = util.compengines[engine].revlogcompressor()
2599 compressor = util.compengines[engine].revlogcompressor()
2466 benches.append((functools.partial(docompress, compressor),
2600 benches.append((functools.partial(docompress, compressor),
2467 b'compress w/ %s' % engine))
2601 b'compress w/ %s' % engine))
2468
2602
2469 for fn, title in benches:
2603 for fn, title in benches:
2470 timer, fm = gettimer(ui, opts)
2604 timer, fm = gettimer(ui, opts)
2471 timer(fn, title=title)
2605 timer(fn, title=title)
2472 fm.end()
2606 fm.end()
2473
2607
2474 @command(b'perfrevlogrevision', revlogopts + formatteropts +
2608 @command(b'perfrevlogrevision', revlogopts + formatteropts +
2475 [(b'', b'cache', False, b'use caches instead of clearing')],
2609 [(b'', b'cache', False, b'use caches instead of clearing')],
2476 b'-c|-m|FILE REV')
2610 b'-c|-m|FILE REV')
2477 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
2611 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
2478 """Benchmark obtaining a revlog revision.
2612 """Benchmark obtaining a revlog revision.
2479
2613
2480 Obtaining a revlog revision consists of roughly the following steps:
2614 Obtaining a revlog revision consists of roughly the following steps:
2481
2615
2482 1. Compute the delta chain
2616 1. Compute the delta chain
2483 2. Slice the delta chain if applicable
2617 2. Slice the delta chain if applicable
2484 3. Obtain the raw chunks for that delta chain
2618 3. Obtain the raw chunks for that delta chain
2485 4. Decompress each raw chunk
2619 4. Decompress each raw chunk
2486 5. Apply binary patches to obtain fulltext
2620 5. Apply binary patches to obtain fulltext
2487 6. Verify hash of fulltext
2621 6. Verify hash of fulltext
2488
2622
2489 This command measures the time spent in each of these phases.
2623 This command measures the time spent in each of these phases.
2490 """
2624 """
2491 opts = _byteskwargs(opts)
2625 opts = _byteskwargs(opts)
2492
2626
2493 if opts.get(b'changelog') or opts.get(b'manifest'):
2627 if opts.get(b'changelog') or opts.get(b'manifest'):
2494 file_, rev = None, file_
2628 file_, rev = None, file_
2495 elif rev is None:
2629 elif rev is None:
2496 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
2630 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
2497
2631
2498 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
2632 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
2499
2633
2500 # _chunkraw was renamed to _getsegmentforrevs.
2634 # _chunkraw was renamed to _getsegmentforrevs.
2501 try:
2635 try:
2502 segmentforrevs = r._getsegmentforrevs
2636 segmentforrevs = r._getsegmentforrevs
2503 except AttributeError:
2637 except AttributeError:
2504 segmentforrevs = r._chunkraw
2638 segmentforrevs = r._chunkraw
2505
2639
2506 node = r.lookup(rev)
2640 node = r.lookup(rev)
2507 rev = r.rev(node)
2641 rev = r.rev(node)
2508
2642
2509 def getrawchunks(data, chain):
2643 def getrawchunks(data, chain):
2510 start = r.start
2644 start = r.start
2511 length = r.length
2645 length = r.length
2512 inline = r._inline
2646 inline = r._inline
2513 iosize = r._io.size
2647 iosize = r._io.size
2514 buffer = util.buffer
2648 buffer = util.buffer
2515
2649
2516 chunks = []
2650 chunks = []
2517 ladd = chunks.append
2651 ladd = chunks.append
2518 for idx, item in enumerate(chain):
2652 for idx, item in enumerate(chain):
2519 offset = start(item[0])
2653 offset = start(item[0])
2520 bits = data[idx]
2654 bits = data[idx]
2521 for rev in item:
2655 for rev in item:
2522 chunkstart = start(rev)
2656 chunkstart = start(rev)
2523 if inline:
2657 if inline:
2524 chunkstart += (rev + 1) * iosize
2658 chunkstart += (rev + 1) * iosize
2525 chunklength = length(rev)
2659 chunklength = length(rev)
2526 ladd(buffer(bits, chunkstart - offset, chunklength))
2660 ladd(buffer(bits, chunkstart - offset, chunklength))
2527
2661
2528 return chunks
2662 return chunks
2529
2663
2530 def dodeltachain(rev):
2664 def dodeltachain(rev):
2531 if not cache:
2665 if not cache:
2532 r.clearcaches()
2666 r.clearcaches()
2533 r._deltachain(rev)
2667 r._deltachain(rev)
2534
2668
2535 def doread(chain):
2669 def doread(chain):
2536 if not cache:
2670 if not cache:
2537 r.clearcaches()
2671 r.clearcaches()
2538 for item in slicedchain:
2672 for item in slicedchain:
2539 segmentforrevs(item[0], item[-1])
2673 segmentforrevs(item[0], item[-1])
2540
2674
2541 def doslice(r, chain, size):
2675 def doslice(r, chain, size):
2542 for s in slicechunk(r, chain, targetsize=size):
2676 for s in slicechunk(r, chain, targetsize=size):
2543 pass
2677 pass
2544
2678
2545 def dorawchunks(data, chain):
2679 def dorawchunks(data, chain):
2546 if not cache:
2680 if not cache:
2547 r.clearcaches()
2681 r.clearcaches()
2548 getrawchunks(data, chain)
2682 getrawchunks(data, chain)
2549
2683
2550 def dodecompress(chunks):
2684 def dodecompress(chunks):
2551 decomp = r.decompress
2685 decomp = r.decompress
2552 for chunk in chunks:
2686 for chunk in chunks:
2553 decomp(chunk)
2687 decomp(chunk)
2554
2688
2555 def dopatch(text, bins):
2689 def dopatch(text, bins):
2556 if not cache:
2690 if not cache:
2557 r.clearcaches()
2691 r.clearcaches()
2558 mdiff.patches(text, bins)
2692 mdiff.patches(text, bins)
2559
2693
2560 def dohash(text):
2694 def dohash(text):
2561 if not cache:
2695 if not cache:
2562 r.clearcaches()
2696 r.clearcaches()
2563 r.checkhash(text, node, rev=rev)
2697 r.checkhash(text, node, rev=rev)
2564
2698
2565 def dorevision():
2699 def dorevision():
2566 if not cache:
2700 if not cache:
2567 r.clearcaches()
2701 r.clearcaches()
2568 r.revision(node)
2702 r.revision(node)
2569
2703
2570 try:
2704 try:
2571 from mercurial.revlogutils.deltas import slicechunk
2705 from mercurial.revlogutils.deltas import slicechunk
2572 except ImportError:
2706 except ImportError:
2573 slicechunk = getattr(revlog, '_slicechunk', None)
2707 slicechunk = getattr(revlog, '_slicechunk', None)
2574
2708
2575 size = r.length(rev)
2709 size = r.length(rev)
2576 chain = r._deltachain(rev)[0]
2710 chain = r._deltachain(rev)[0]
2577 if not getattr(r, '_withsparseread', False):
2711 if not getattr(r, '_withsparseread', False):
2578 slicedchain = (chain,)
2712 slicedchain = (chain,)
2579 else:
2713 else:
2580 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
2714 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
2581 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
2715 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
2582 rawchunks = getrawchunks(data, slicedchain)
2716 rawchunks = getrawchunks(data, slicedchain)
2583 bins = r._chunks(chain)
2717 bins = r._chunks(chain)
2584 text = bytes(bins[0])
2718 text = bytes(bins[0])
2585 bins = bins[1:]
2719 bins = bins[1:]
2586 text = mdiff.patches(text, bins)
2720 text = mdiff.patches(text, bins)
2587
2721
2588 benches = [
2722 benches = [
2589 (lambda: dorevision(), b'full'),
2723 (lambda: dorevision(), b'full'),
2590 (lambda: dodeltachain(rev), b'deltachain'),
2724 (lambda: dodeltachain(rev), b'deltachain'),
2591 (lambda: doread(chain), b'read'),
2725 (lambda: doread(chain), b'read'),
2592 ]
2726 ]
2593
2727
2594 if getattr(r, '_withsparseread', False):
2728 if getattr(r, '_withsparseread', False):
2595 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
2729 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
2596 benches.append(slicing)
2730 benches.append(slicing)
2597
2731
2598 benches.extend([
2732 benches.extend([
2599 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
2733 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
2600 (lambda: dodecompress(rawchunks), b'decompress'),
2734 (lambda: dodecompress(rawchunks), b'decompress'),
2601 (lambda: dopatch(text, bins), b'patch'),
2735 (lambda: dopatch(text, bins), b'patch'),
2602 (lambda: dohash(text), b'hash'),
2736 (lambda: dohash(text), b'hash'),
2603 ])
2737 ])
2604
2738
2605 timer, fm = gettimer(ui, opts)
2739 timer, fm = gettimer(ui, opts)
2606 for fn, title in benches:
2740 for fn, title in benches:
2607 timer(fn, title=title)
2741 timer(fn, title=title)
2608 fm.end()
2742 fm.end()
2609
2743
2610 @command(b'perfrevset',
2744 @command(b'perfrevset',
2611 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
2745 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
2612 (b'', b'contexts', False, b'obtain changectx for each revision')]
2746 (b'', b'contexts', False, b'obtain changectx for each revision')]
2613 + formatteropts, b"REVSET")
2747 + formatteropts, b"REVSET")
2614 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
2748 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
2615 """benchmark the execution time of a revset
2749 """benchmark the execution time of a revset
2616
2750
2617 Use the --clean option if need to evaluate the impact of build volatile
2751 Use the --clean option if need to evaluate the impact of build volatile
2618 revisions set cache on the revset execution. Volatile cache hold filtered
2752 revisions set cache on the revset execution. Volatile cache hold filtered
2619 and obsolete related cache."""
2753 and obsolete related cache."""
2620 opts = _byteskwargs(opts)
2754 opts = _byteskwargs(opts)
2621
2755
2622 timer, fm = gettimer(ui, opts)
2756 timer, fm = gettimer(ui, opts)
2623 def d():
2757 def d():
2624 if clear:
2758 if clear:
2625 repo.invalidatevolatilesets()
2759 repo.invalidatevolatilesets()
2626 if contexts:
2760 if contexts:
2627 for ctx in repo.set(expr): pass
2761 for ctx in repo.set(expr): pass
2628 else:
2762 else:
2629 for r in repo.revs(expr): pass
2763 for r in repo.revs(expr): pass
2630 timer(d)
2764 timer(d)
2631 fm.end()
2765 fm.end()
2632
2766
2633 @command(b'perfvolatilesets',
2767 @command(b'perfvolatilesets',
2634 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
2768 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
2635 ] + formatteropts)
2769 ] + formatteropts)
2636 def perfvolatilesets(ui, repo, *names, **opts):
2770 def perfvolatilesets(ui, repo, *names, **opts):
2637 """benchmark the computation of various volatile set
2771 """benchmark the computation of various volatile set
2638
2772
2639 Volatile set computes element related to filtering and obsolescence."""
2773 Volatile set computes element related to filtering and obsolescence."""
2640 opts = _byteskwargs(opts)
2774 opts = _byteskwargs(opts)
2641 timer, fm = gettimer(ui, opts)
2775 timer, fm = gettimer(ui, opts)
2642 repo = repo.unfiltered()
2776 repo = repo.unfiltered()
2643
2777
2644 def getobs(name):
2778 def getobs(name):
2645 def d():
2779 def d():
2646 repo.invalidatevolatilesets()
2780 repo.invalidatevolatilesets()
2647 if opts[b'clear_obsstore']:
2781 if opts[b'clear_obsstore']:
2648 clearfilecache(repo, b'obsstore')
2782 clearfilecache(repo, b'obsstore')
2649 obsolete.getrevs(repo, name)
2783 obsolete.getrevs(repo, name)
2650 return d
2784 return d
2651
2785
2652 allobs = sorted(obsolete.cachefuncs)
2786 allobs = sorted(obsolete.cachefuncs)
2653 if names:
2787 if names:
2654 allobs = [n for n in allobs if n in names]
2788 allobs = [n for n in allobs if n in names]
2655
2789
2656 for name in allobs:
2790 for name in allobs:
2657 timer(getobs(name), title=name)
2791 timer(getobs(name), title=name)
2658
2792
2659 def getfiltered(name):
2793 def getfiltered(name):
2660 def d():
2794 def d():
2661 repo.invalidatevolatilesets()
2795 repo.invalidatevolatilesets()
2662 if opts[b'clear_obsstore']:
2796 if opts[b'clear_obsstore']:
2663 clearfilecache(repo, b'obsstore')
2797 clearfilecache(repo, b'obsstore')
2664 repoview.filterrevs(repo, name)
2798 repoview.filterrevs(repo, name)
2665 return d
2799 return d
2666
2800
2667 allfilter = sorted(repoview.filtertable)
2801 allfilter = sorted(repoview.filtertable)
2668 if names:
2802 if names:
2669 allfilter = [n for n in allfilter if n in names]
2803 allfilter = [n for n in allfilter if n in names]
2670
2804
2671 for name in allfilter:
2805 for name in allfilter:
2672 timer(getfiltered(name), title=name)
2806 timer(getfiltered(name), title=name)
2673 fm.end()
2807 fm.end()
2674
2808
2675 @command(b'perfbranchmap',
2809 @command(b'perfbranchmap',
2676 [(b'f', b'full', False,
2810 [(b'f', b'full', False,
2677 b'Includes build time of subset'),
2811 b'Includes build time of subset'),
2678 (b'', b'clear-revbranch', False,
2812 (b'', b'clear-revbranch', False,
2679 b'purge the revbranch cache between computation'),
2813 b'purge the revbranch cache between computation'),
2680 ] + formatteropts)
2814 ] + formatteropts)
2681 def perfbranchmap(ui, repo, *filternames, **opts):
2815 def perfbranchmap(ui, repo, *filternames, **opts):
2682 """benchmark the update of a branchmap
2816 """benchmark the update of a branchmap
2683
2817
2684 This benchmarks the full repo.branchmap() call with read and write disabled
2818 This benchmarks the full repo.branchmap() call with read and write disabled
2685 """
2819 """
2686 opts = _byteskwargs(opts)
2820 opts = _byteskwargs(opts)
2687 full = opts.get(b"full", False)
2821 full = opts.get(b"full", False)
2688 clear_revbranch = opts.get(b"clear_revbranch", False)
2822 clear_revbranch = opts.get(b"clear_revbranch", False)
2689 timer, fm = gettimer(ui, opts)
2823 timer, fm = gettimer(ui, opts)
2690 def getbranchmap(filtername):
2824 def getbranchmap(filtername):
2691 """generate a benchmark function for the filtername"""
2825 """generate a benchmark function for the filtername"""
2692 if filtername is None:
2826 if filtername is None:
2693 view = repo
2827 view = repo
2694 else:
2828 else:
2695 view = repo.filtered(filtername)
2829 view = repo.filtered(filtername)
2696 if util.safehasattr(view._branchcaches, '_per_filter'):
2830 if util.safehasattr(view._branchcaches, '_per_filter'):
2697 filtered = view._branchcaches._per_filter
2831 filtered = view._branchcaches._per_filter
2698 else:
2832 else:
2699 # older versions
2833 # older versions
2700 filtered = view._branchcaches
2834 filtered = view._branchcaches
2701 def d():
2835 def d():
2702 if clear_revbranch:
2836 if clear_revbranch:
2703 repo.revbranchcache()._clear()
2837 repo.revbranchcache()._clear()
2704 if full:
2838 if full:
2705 view._branchcaches.clear()
2839 view._branchcaches.clear()
2706 else:
2840 else:
2707 filtered.pop(filtername, None)
2841 filtered.pop(filtername, None)
2708 view.branchmap()
2842 view.branchmap()
2709 return d
2843 return d
2710 # add filter in smaller subset to bigger subset
2844 # add filter in smaller subset to bigger subset
2711 possiblefilters = set(repoview.filtertable)
2845 possiblefilters = set(repoview.filtertable)
2712 if filternames:
2846 if filternames:
2713 possiblefilters &= set(filternames)
2847 possiblefilters &= set(filternames)
2714 subsettable = getbranchmapsubsettable()
2848 subsettable = getbranchmapsubsettable()
2715 allfilters = []
2849 allfilters = []
2716 while possiblefilters:
2850 while possiblefilters:
2717 for name in possiblefilters:
2851 for name in possiblefilters:
2718 subset = subsettable.get(name)
2852 subset = subsettable.get(name)
2719 if subset not in possiblefilters:
2853 if subset not in possiblefilters:
2720 break
2854 break
2721 else:
2855 else:
2722 assert False, b'subset cycle %s!' % possiblefilters
2856 assert False, b'subset cycle %s!' % possiblefilters
2723 allfilters.append(name)
2857 allfilters.append(name)
2724 possiblefilters.remove(name)
2858 possiblefilters.remove(name)
2725
2859
2726 # warm the cache
2860 # warm the cache
2727 if not full:
2861 if not full:
2728 for name in allfilters:
2862 for name in allfilters:
2729 repo.filtered(name).branchmap()
2863 repo.filtered(name).branchmap()
2730 if not filternames or b'unfiltered' in filternames:
2864 if not filternames or b'unfiltered' in filternames:
2731 # add unfiltered
2865 # add unfiltered
2732 allfilters.append(None)
2866 allfilters.append(None)
2733
2867
2734 if util.safehasattr(branchmap.branchcache, 'fromfile'):
2868 if util.safehasattr(branchmap.branchcache, 'fromfile'):
2735 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
2869 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
2736 branchcacheread.set(classmethod(lambda *args: None))
2870 branchcacheread.set(classmethod(lambda *args: None))
2737 else:
2871 else:
2738 # older versions
2872 # older versions
2739 branchcacheread = safeattrsetter(branchmap, b'read')
2873 branchcacheread = safeattrsetter(branchmap, b'read')
2740 branchcacheread.set(lambda *args: None)
2874 branchcacheread.set(lambda *args: None)
2741 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
2875 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
2742 branchcachewrite.set(lambda *args: None)
2876 branchcachewrite.set(lambda *args: None)
2743 try:
2877 try:
2744 for name in allfilters:
2878 for name in allfilters:
2745 printname = name
2879 printname = name
2746 if name is None:
2880 if name is None:
2747 printname = b'unfiltered'
2881 printname = b'unfiltered'
2748 timer(getbranchmap(name), title=str(printname))
2882 timer(getbranchmap(name), title=str(printname))
2749 finally:
2883 finally:
2750 branchcacheread.restore()
2884 branchcacheread.restore()
2751 branchcachewrite.restore()
2885 branchcachewrite.restore()
2752 fm.end()
2886 fm.end()
2753
2887
2754 @command(b'perfbranchmapupdate', [
2888 @command(b'perfbranchmapupdate', [
2755 (b'', b'base', [], b'subset of revision to start from'),
2889 (b'', b'base', [], b'subset of revision to start from'),
2756 (b'', b'target', [], b'subset of revision to end with'),
2890 (b'', b'target', [], b'subset of revision to end with'),
2757 (b'', b'clear-caches', False, b'clear cache between each runs')
2891 (b'', b'clear-caches', False, b'clear cache between each runs')
2758 ] + formatteropts)
2892 ] + formatteropts)
2759 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
2893 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
2760 """benchmark branchmap update from for <base> revs to <target> revs
2894 """benchmark branchmap update from for <base> revs to <target> revs
2761
2895
2762 If `--clear-caches` is passed, the following items will be reset before
2896 If `--clear-caches` is passed, the following items will be reset before
2763 each update:
2897 each update:
2764 * the changelog instance and associated indexes
2898 * the changelog instance and associated indexes
2765 * the rev-branch-cache instance
2899 * the rev-branch-cache instance
2766
2900
2767 Examples:
2901 Examples:
2768
2902
2769 # update for the one last revision
2903 # update for the one last revision
2770 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
2904 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
2771
2905
2772 $ update for change coming with a new branch
2906 $ update for change coming with a new branch
2773 $ hg perfbranchmapupdate --base 'stable' --target 'default'
2907 $ hg perfbranchmapupdate --base 'stable' --target 'default'
2774 """
2908 """
2775 from mercurial import branchmap
2909 from mercurial import branchmap
2776 from mercurial import repoview
2910 from mercurial import repoview
2777 opts = _byteskwargs(opts)
2911 opts = _byteskwargs(opts)
2778 timer, fm = gettimer(ui, opts)
2912 timer, fm = gettimer(ui, opts)
2779 clearcaches = opts[b'clear_caches']
2913 clearcaches = opts[b'clear_caches']
2780 unfi = repo.unfiltered()
2914 unfi = repo.unfiltered()
2781 x = [None] # used to pass data between closure
2915 x = [None] # used to pass data between closure
2782
2916
2783 # we use a `list` here to avoid possible side effect from smartset
2917 # we use a `list` here to avoid possible side effect from smartset
2784 baserevs = list(scmutil.revrange(repo, base))
2918 baserevs = list(scmutil.revrange(repo, base))
2785 targetrevs = list(scmutil.revrange(repo, target))
2919 targetrevs = list(scmutil.revrange(repo, target))
2786 if not baserevs:
2920 if not baserevs:
2787 raise error.Abort(b'no revisions selected for --base')
2921 raise error.Abort(b'no revisions selected for --base')
2788 if not targetrevs:
2922 if not targetrevs:
2789 raise error.Abort(b'no revisions selected for --target')
2923 raise error.Abort(b'no revisions selected for --target')
2790
2924
2791 # make sure the target branchmap also contains the one in the base
2925 # make sure the target branchmap also contains the one in the base
2792 targetrevs = list(set(baserevs) | set(targetrevs))
2926 targetrevs = list(set(baserevs) | set(targetrevs))
2793 targetrevs.sort()
2927 targetrevs.sort()
2794
2928
2795 cl = repo.changelog
2929 cl = repo.changelog
2796 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
2930 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
2797 allbaserevs.sort()
2931 allbaserevs.sort()
2798 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
2932 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
2799
2933
2800 newrevs = list(alltargetrevs.difference(allbaserevs))
2934 newrevs = list(alltargetrevs.difference(allbaserevs))
2801 newrevs.sort()
2935 newrevs.sort()
2802
2936
2803 allrevs = frozenset(unfi.changelog.revs())
2937 allrevs = frozenset(unfi.changelog.revs())
2804 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
2938 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
2805 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
2939 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
2806
2940
2807 def basefilter(repo, visibilityexceptions=None):
2941 def basefilter(repo, visibilityexceptions=None):
2808 return basefilterrevs
2942 return basefilterrevs
2809
2943
2810 def targetfilter(repo, visibilityexceptions=None):
2944 def targetfilter(repo, visibilityexceptions=None):
2811 return targetfilterrevs
2945 return targetfilterrevs
2812
2946
2813 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
2947 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
2814 ui.status(msg % (len(allbaserevs), len(newrevs)))
2948 ui.status(msg % (len(allbaserevs), len(newrevs)))
2815 if targetfilterrevs:
2949 if targetfilterrevs:
2816 msg = b'(%d revisions still filtered)\n'
2950 msg = b'(%d revisions still filtered)\n'
2817 ui.status(msg % len(targetfilterrevs))
2951 ui.status(msg % len(targetfilterrevs))
2818
2952
2819 try:
2953 try:
2820 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
2954 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
2821 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
2955 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
2822
2956
2823 baserepo = repo.filtered(b'__perf_branchmap_update_base')
2957 baserepo = repo.filtered(b'__perf_branchmap_update_base')
2824 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
2958 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
2825
2959
2826 # try to find an existing branchmap to reuse
2960 # try to find an existing branchmap to reuse
2827 subsettable = getbranchmapsubsettable()
2961 subsettable = getbranchmapsubsettable()
2828 candidatefilter = subsettable.get(None)
2962 candidatefilter = subsettable.get(None)
2829 while candidatefilter is not None:
2963 while candidatefilter is not None:
2830 candidatebm = repo.filtered(candidatefilter).branchmap()
2964 candidatebm = repo.filtered(candidatefilter).branchmap()
2831 if candidatebm.validfor(baserepo):
2965 if candidatebm.validfor(baserepo):
2832 filtered = repoview.filterrevs(repo, candidatefilter)
2966 filtered = repoview.filterrevs(repo, candidatefilter)
2833 missing = [r for r in allbaserevs if r in filtered]
2967 missing = [r for r in allbaserevs if r in filtered]
2834 base = candidatebm.copy()
2968 base = candidatebm.copy()
2835 base.update(baserepo, missing)
2969 base.update(baserepo, missing)
2836 break
2970 break
2837 candidatefilter = subsettable.get(candidatefilter)
2971 candidatefilter = subsettable.get(candidatefilter)
2838 else:
2972 else:
2839 # no suitable subset where found
2973 # no suitable subset where found
2840 base = branchmap.branchcache()
2974 base = branchmap.branchcache()
2841 base.update(baserepo, allbaserevs)
2975 base.update(baserepo, allbaserevs)
2842
2976
2843 def setup():
2977 def setup():
2844 x[0] = base.copy()
2978 x[0] = base.copy()
2845 if clearcaches:
2979 if clearcaches:
2846 unfi._revbranchcache = None
2980 unfi._revbranchcache = None
2847 clearchangelog(repo)
2981 clearchangelog(repo)
2848
2982
2849 def bench():
2983 def bench():
2850 x[0].update(targetrepo, newrevs)
2984 x[0].update(targetrepo, newrevs)
2851
2985
2852 timer(bench, setup=setup)
2986 timer(bench, setup=setup)
2853 fm.end()
2987 fm.end()
2854 finally:
2988 finally:
2855 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
2989 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
2856 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
2990 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
2857
2991
2858 @command(b'perfbranchmapload', [
2992 @command(b'perfbranchmapload', [
2859 (b'f', b'filter', b'', b'Specify repoview filter'),
2993 (b'f', b'filter', b'', b'Specify repoview filter'),
2860 (b'', b'list', False, b'List brachmap filter caches'),
2994 (b'', b'list', False, b'List brachmap filter caches'),
2861 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
2995 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
2862
2996
2863 ] + formatteropts)
2997 ] + formatteropts)
2864 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
2998 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
2865 """benchmark reading the branchmap"""
2999 """benchmark reading the branchmap"""
2866 opts = _byteskwargs(opts)
3000 opts = _byteskwargs(opts)
2867 clearrevlogs = opts[b'clear_revlogs']
3001 clearrevlogs = opts[b'clear_revlogs']
2868
3002
2869 if list:
3003 if list:
2870 for name, kind, st in repo.cachevfs.readdir(stat=True):
3004 for name, kind, st in repo.cachevfs.readdir(stat=True):
2871 if name.startswith(b'branch2'):
3005 if name.startswith(b'branch2'):
2872 filtername = name.partition(b'-')[2] or b'unfiltered'
3006 filtername = name.partition(b'-')[2] or b'unfiltered'
2873 ui.status(b'%s - %s\n'
3007 ui.status(b'%s - %s\n'
2874 % (filtername, util.bytecount(st.st_size)))
3008 % (filtername, util.bytecount(st.st_size)))
2875 return
3009 return
2876 if not filter:
3010 if not filter:
2877 filter = None
3011 filter = None
2878 subsettable = getbranchmapsubsettable()
3012 subsettable = getbranchmapsubsettable()
2879 if filter is None:
3013 if filter is None:
2880 repo = repo.unfiltered()
3014 repo = repo.unfiltered()
2881 else:
3015 else:
2882 repo = repoview.repoview(repo, filter)
3016 repo = repoview.repoview(repo, filter)
2883
3017
2884 repo.branchmap() # make sure we have a relevant, up to date branchmap
3018 repo.branchmap() # make sure we have a relevant, up to date branchmap
2885
3019
2886 try:
3020 try:
2887 fromfile = branchmap.branchcache.fromfile
3021 fromfile = branchmap.branchcache.fromfile
2888 except AttributeError:
3022 except AttributeError:
2889 # older versions
3023 # older versions
2890 fromfile = branchmap.read
3024 fromfile = branchmap.read
2891
3025
2892 currentfilter = filter
3026 currentfilter = filter
2893 # try once without timer, the filter may not be cached
3027 # try once without timer, the filter may not be cached
2894 while fromfile(repo) is None:
3028 while fromfile(repo) is None:
2895 currentfilter = subsettable.get(currentfilter)
3029 currentfilter = subsettable.get(currentfilter)
2896 if currentfilter is None:
3030 if currentfilter is None:
2897 raise error.Abort(b'No branchmap cached for %s repo'
3031 raise error.Abort(b'No branchmap cached for %s repo'
2898 % (filter or b'unfiltered'))
3032 % (filter or b'unfiltered'))
2899 repo = repo.filtered(currentfilter)
3033 repo = repo.filtered(currentfilter)
2900 timer, fm = gettimer(ui, opts)
3034 timer, fm = gettimer(ui, opts)
2901 def setup():
3035 def setup():
2902 if clearrevlogs:
3036 if clearrevlogs:
2903 clearchangelog(repo)
3037 clearchangelog(repo)
2904 def bench():
3038 def bench():
2905 fromfile(repo)
3039 fromfile(repo)
2906 timer(bench, setup=setup)
3040 timer(bench, setup=setup)
2907 fm.end()
3041 fm.end()
2908
3042
2909 @command(b'perfloadmarkers')
3043 @command(b'perfloadmarkers')
2910 def perfloadmarkers(ui, repo):
3044 def perfloadmarkers(ui, repo):
2911 """benchmark the time to parse the on-disk markers for a repo
3045 """benchmark the time to parse the on-disk markers for a repo
2912
3046
2913 Result is the number of markers in the repo."""
3047 Result is the number of markers in the repo."""
2914 timer, fm = gettimer(ui)
3048 timer, fm = gettimer(ui)
2915 svfs = getsvfs(repo)
3049 svfs = getsvfs(repo)
2916 timer(lambda: len(obsolete.obsstore(svfs)))
3050 timer(lambda: len(obsolete.obsstore(svfs)))
2917 fm.end()
3051 fm.end()
2918
3052
2919 @command(b'perflrucachedict', formatteropts +
3053 @command(b'perflrucachedict', formatteropts +
2920 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
3054 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
2921 (b'', b'mincost', 0, b'smallest cost of items in cache'),
3055 (b'', b'mincost', 0, b'smallest cost of items in cache'),
2922 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
3056 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
2923 (b'', b'size', 4, b'size of cache'),
3057 (b'', b'size', 4, b'size of cache'),
2924 (b'', b'gets', 10000, b'number of key lookups'),
3058 (b'', b'gets', 10000, b'number of key lookups'),
2925 (b'', b'sets', 10000, b'number of key sets'),
3059 (b'', b'sets', 10000, b'number of key sets'),
2926 (b'', b'mixed', 10000, b'number of mixed mode operations'),
3060 (b'', b'mixed', 10000, b'number of mixed mode operations'),
2927 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
3061 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
2928 norepo=True)
3062 norepo=True)
2929 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
3063 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
2930 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
3064 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
2931 opts = _byteskwargs(opts)
3065 opts = _byteskwargs(opts)
2932
3066
2933 def doinit():
3067 def doinit():
2934 for i in _xrange(10000):
3068 for i in _xrange(10000):
2935 util.lrucachedict(size)
3069 util.lrucachedict(size)
2936
3070
2937 costrange = list(range(mincost, maxcost + 1))
3071 costrange = list(range(mincost, maxcost + 1))
2938
3072
2939 values = []
3073 values = []
2940 for i in _xrange(size):
3074 for i in _xrange(size):
2941 values.append(random.randint(0, _maxint))
3075 values.append(random.randint(0, _maxint))
2942
3076
2943 # Get mode fills the cache and tests raw lookup performance with no
3077 # Get mode fills the cache and tests raw lookup performance with no
2944 # eviction.
3078 # eviction.
2945 getseq = []
3079 getseq = []
2946 for i in _xrange(gets):
3080 for i in _xrange(gets):
2947 getseq.append(random.choice(values))
3081 getseq.append(random.choice(values))
2948
3082
2949 def dogets():
3083 def dogets():
2950 d = util.lrucachedict(size)
3084 d = util.lrucachedict(size)
2951 for v in values:
3085 for v in values:
2952 d[v] = v
3086 d[v] = v
2953 for key in getseq:
3087 for key in getseq:
2954 value = d[key]
3088 value = d[key]
2955 value # silence pyflakes warning
3089 value # silence pyflakes warning
2956
3090
2957 def dogetscost():
3091 def dogetscost():
2958 d = util.lrucachedict(size, maxcost=costlimit)
3092 d = util.lrucachedict(size, maxcost=costlimit)
2959 for i, v in enumerate(values):
3093 for i, v in enumerate(values):
2960 d.insert(v, v, cost=costs[i])
3094 d.insert(v, v, cost=costs[i])
2961 for key in getseq:
3095 for key in getseq:
2962 try:
3096 try:
2963 value = d[key]
3097 value = d[key]
2964 value # silence pyflakes warning
3098 value # silence pyflakes warning
2965 except KeyError:
3099 except KeyError:
2966 pass
3100 pass
2967
3101
2968 # Set mode tests insertion speed with cache eviction.
3102 # Set mode tests insertion speed with cache eviction.
2969 setseq = []
3103 setseq = []
2970 costs = []
3104 costs = []
2971 for i in _xrange(sets):
3105 for i in _xrange(sets):
2972 setseq.append(random.randint(0, _maxint))
3106 setseq.append(random.randint(0, _maxint))
2973 costs.append(random.choice(costrange))
3107 costs.append(random.choice(costrange))
2974
3108
2975 def doinserts():
3109 def doinserts():
2976 d = util.lrucachedict(size)
3110 d = util.lrucachedict(size)
2977 for v in setseq:
3111 for v in setseq:
2978 d.insert(v, v)
3112 d.insert(v, v)
2979
3113
2980 def doinsertscost():
3114 def doinsertscost():
2981 d = util.lrucachedict(size, maxcost=costlimit)
3115 d = util.lrucachedict(size, maxcost=costlimit)
2982 for i, v in enumerate(setseq):
3116 for i, v in enumerate(setseq):
2983 d.insert(v, v, cost=costs[i])
3117 d.insert(v, v, cost=costs[i])
2984
3118
2985 def dosets():
3119 def dosets():
2986 d = util.lrucachedict(size)
3120 d = util.lrucachedict(size)
2987 for v in setseq:
3121 for v in setseq:
2988 d[v] = v
3122 d[v] = v
2989
3123
2990 # Mixed mode randomly performs gets and sets with eviction.
3124 # Mixed mode randomly performs gets and sets with eviction.
2991 mixedops = []
3125 mixedops = []
2992 for i in _xrange(mixed):
3126 for i in _xrange(mixed):
2993 r = random.randint(0, 100)
3127 r = random.randint(0, 100)
2994 if r < mixedgetfreq:
3128 if r < mixedgetfreq:
2995 op = 0
3129 op = 0
2996 else:
3130 else:
2997 op = 1
3131 op = 1
2998
3132
2999 mixedops.append((op,
3133 mixedops.append((op,
3000 random.randint(0, size * 2),
3134 random.randint(0, size * 2),
3001 random.choice(costrange)))
3135 random.choice(costrange)))
3002
3136
3003 def domixed():
3137 def domixed():
3004 d = util.lrucachedict(size)
3138 d = util.lrucachedict(size)
3005
3139
3006 for op, v, cost in mixedops:
3140 for op, v, cost in mixedops:
3007 if op == 0:
3141 if op == 0:
3008 try:
3142 try:
3009 d[v]
3143 d[v]
3010 except KeyError:
3144 except KeyError:
3011 pass
3145 pass
3012 else:
3146 else:
3013 d[v] = v
3147 d[v] = v
3014
3148
3015 def domixedcost():
3149 def domixedcost():
3016 d = util.lrucachedict(size, maxcost=costlimit)
3150 d = util.lrucachedict(size, maxcost=costlimit)
3017
3151
3018 for op, v, cost in mixedops:
3152 for op, v, cost in mixedops:
3019 if op == 0:
3153 if op == 0:
3020 try:
3154 try:
3021 d[v]
3155 d[v]
3022 except KeyError:
3156 except KeyError:
3023 pass
3157 pass
3024 else:
3158 else:
3025 d.insert(v, v, cost=cost)
3159 d.insert(v, v, cost=cost)
3026
3160
3027 benches = [
3161 benches = [
3028 (doinit, b'init'),
3162 (doinit, b'init'),
3029 ]
3163 ]
3030
3164
3031 if costlimit:
3165 if costlimit:
3032 benches.extend([
3166 benches.extend([
3033 (dogetscost, b'gets w/ cost limit'),
3167 (dogetscost, b'gets w/ cost limit'),
3034 (doinsertscost, b'inserts w/ cost limit'),
3168 (doinsertscost, b'inserts w/ cost limit'),
3035 (domixedcost, b'mixed w/ cost limit'),
3169 (domixedcost, b'mixed w/ cost limit'),
3036 ])
3170 ])
3037 else:
3171 else:
3038 benches.extend([
3172 benches.extend([
3039 (dogets, b'gets'),
3173 (dogets, b'gets'),
3040 (doinserts, b'inserts'),
3174 (doinserts, b'inserts'),
3041 (dosets, b'sets'),
3175 (dosets, b'sets'),
3042 (domixed, b'mixed')
3176 (domixed, b'mixed')
3043 ])
3177 ])
3044
3178
3045 for fn, title in benches:
3179 for fn, title in benches:
3046 timer, fm = gettimer(ui, opts)
3180 timer, fm = gettimer(ui, opts)
3047 timer(fn, title=title)
3181 timer(fn, title=title)
3048 fm.end()
3182 fm.end()
3049
3183
3050 @command(b'perfwrite', formatteropts)
3184 @command(b'perfwrite', formatteropts)
3051 def perfwrite(ui, repo, **opts):
3185 def perfwrite(ui, repo, **opts):
3052 """microbenchmark ui.write
3186 """microbenchmark ui.write
3053 """
3187 """
3054 opts = _byteskwargs(opts)
3188 opts = _byteskwargs(opts)
3055
3189
3056 timer, fm = gettimer(ui, opts)
3190 timer, fm = gettimer(ui, opts)
3057 def write():
3191 def write():
3058 for i in range(100000):
3192 for i in range(100000):
3059 ui.write((b'Testing write performance\n'))
3193 ui.write((b'Testing write performance\n'))
3060 timer(write)
3194 timer(write)
3061 fm.end()
3195 fm.end()
3062
3196
3063 def uisetup(ui):
3197 def uisetup(ui):
3064 if (util.safehasattr(cmdutil, b'openrevlog') and
3198 if (util.safehasattr(cmdutil, b'openrevlog') and
3065 not util.safehasattr(commands, b'debugrevlogopts')):
3199 not util.safehasattr(commands, b'debugrevlogopts')):
3066 # for "historical portability":
3200 # for "historical portability":
3067 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
3201 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
3068 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
3202 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
3069 # openrevlog() should cause failure, because it has been
3203 # openrevlog() should cause failure, because it has been
3070 # available since 3.5 (or 49c583ca48c4).
3204 # available since 3.5 (or 49c583ca48c4).
3071 def openrevlog(orig, repo, cmd, file_, opts):
3205 def openrevlog(orig, repo, cmd, file_, opts):
3072 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
3206 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
3073 raise error.Abort(b"This version doesn't support --dir option",
3207 raise error.Abort(b"This version doesn't support --dir option",
3074 hint=b"use 3.5 or later")
3208 hint=b"use 3.5 or later")
3075 return orig(repo, cmd, file_, opts)
3209 return orig(repo, cmd, file_, opts)
3076 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
3210 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
3077
3211
3078 @command(b'perfprogress', formatteropts + [
3212 @command(b'perfprogress', formatteropts + [
3079 (b'', b'topic', b'topic', b'topic for progress messages'),
3213 (b'', b'topic', b'topic', b'topic for progress messages'),
3080 (b'c', b'total', 1000000, b'total value we are progressing to'),
3214 (b'c', b'total', 1000000, b'total value we are progressing to'),
3081 ], norepo=True)
3215 ], norepo=True)
3082 def perfprogress(ui, topic=None, total=None, **opts):
3216 def perfprogress(ui, topic=None, total=None, **opts):
3083 """printing of progress bars"""
3217 """printing of progress bars"""
3084 opts = _byteskwargs(opts)
3218 opts = _byteskwargs(opts)
3085
3219
3086 timer, fm = gettimer(ui, opts)
3220 timer, fm = gettimer(ui, opts)
3087
3221
3088 def doprogress():
3222 def doprogress():
3089 with ui.makeprogress(topic, total=total) as progress:
3223 with ui.makeprogress(topic, total=total) as progress:
3090 for i in _xrange(total):
3224 for i in _xrange(total):
3091 progress.increment()
3225 progress.increment()
3092
3226
3093 timer(doprogress)
3227 timer(doprogress)
3094 fm.end()
3228 fm.end()
General Comments 0
You need to be logged in to leave comments. Login now