##// END OF EJS Templates
perf: allow to specify the base of the merge in perfmergecalculate...
marmoute -
r42574:f0bcbbb6 default
parent child Browse files
Show More
@@ -1,2912 +1,2924 b''
1 # perf.py - performance test routines
1 # perf.py - performance test routines
2 '''helper extension to measure performance
2 '''helper extension to measure performance
3
3
4 Configurations
4 Configurations
5 ==============
5 ==============
6
6
7 ``perf``
7 ``perf``
8 --------
8 --------
9
9
10 ``all-timing``
10 ``all-timing``
11 When set, additional statistics will be reported for each benchmark: best,
11 When set, additional statistics will be reported for each benchmark: best,
12 worst, median average. If not set only the best timing is reported
12 worst, median average. If not set only the best timing is reported
13 (default: off).
13 (default: off).
14
14
15 ``presleep``
15 ``presleep``
16 number of second to wait before any group of runs (default: 1)
16 number of second to wait before any group of runs (default: 1)
17
17
18 ``pre-run``
18 ``pre-run``
19 number of run to perform before starting measurement.
19 number of run to perform before starting measurement.
20
20
21 ``profile-benchmark``
21 ``profile-benchmark``
22 Enable profiling for the benchmarked section.
22 Enable profiling for the benchmarked section.
23 (The first iteration is benchmarked)
23 (The first iteration is benchmarked)
24
24
25 ``run-limits``
25 ``run-limits``
26 Control the number of runs each benchmark will perform. The option value
26 Control the number of runs each benchmark will perform. The option value
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
28 conditions are considered in order with the following logic:
28 conditions are considered in order with the following logic:
29
29
30 If benchmark has been running for <time> seconds, and we have performed
30 If benchmark has been running for <time> seconds, and we have performed
31 <numberofrun> iterations, stop the benchmark,
31 <numberofrun> iterations, stop the benchmark,
32
32
33 The default value is: `3.0-100, 10.0-3`
33 The default value is: `3.0-100, 10.0-3`
34
34
35 ``stub``
35 ``stub``
36 When set, benchmarks will only be run once, useful for testing
36 When set, benchmarks will only be run once, useful for testing
37 (default: off)
37 (default: off)
38 '''
38 '''
39
39
40 # "historical portability" policy of perf.py:
40 # "historical portability" policy of perf.py:
41 #
41 #
42 # We have to do:
42 # We have to do:
43 # - make perf.py "loadable" with as wide Mercurial version as possible
43 # - make perf.py "loadable" with as wide Mercurial version as possible
44 # This doesn't mean that perf commands work correctly with that Mercurial.
44 # This doesn't mean that perf commands work correctly with that Mercurial.
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
46 # - make historical perf command work correctly with as wide Mercurial
46 # - make historical perf command work correctly with as wide Mercurial
47 # version as possible
47 # version as possible
48 #
48 #
49 # We have to do, if possible with reasonable cost:
49 # We have to do, if possible with reasonable cost:
50 # - make recent perf command for historical feature work correctly
50 # - make recent perf command for historical feature work correctly
51 # with early Mercurial
51 # with early Mercurial
52 #
52 #
53 # We don't have to do:
53 # We don't have to do:
54 # - make perf command for recent feature work correctly with early
54 # - make perf command for recent feature work correctly with early
55 # Mercurial
55 # Mercurial
56
56
57 from __future__ import absolute_import
57 from __future__ import absolute_import
58 import contextlib
58 import contextlib
59 import functools
59 import functools
60 import gc
60 import gc
61 import os
61 import os
62 import random
62 import random
63 import shutil
63 import shutil
64 import struct
64 import struct
65 import sys
65 import sys
66 import tempfile
66 import tempfile
67 import threading
67 import threading
68 import time
68 import time
69 from mercurial import (
69 from mercurial import (
70 changegroup,
70 changegroup,
71 cmdutil,
71 cmdutil,
72 commands,
72 commands,
73 copies,
73 copies,
74 error,
74 error,
75 extensions,
75 extensions,
76 hg,
76 hg,
77 mdiff,
77 mdiff,
78 merge,
78 merge,
79 revlog,
79 revlog,
80 util,
80 util,
81 )
81 )
82
82
83 # for "historical portability":
83 # for "historical portability":
84 # try to import modules separately (in dict order), and ignore
84 # try to import modules separately (in dict order), and ignore
85 # failure, because these aren't available with early Mercurial
85 # failure, because these aren't available with early Mercurial
86 try:
86 try:
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
88 except ImportError:
88 except ImportError:
89 pass
89 pass
90 try:
90 try:
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
92 except ImportError:
92 except ImportError:
93 pass
93 pass
94 try:
94 try:
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
96 dir(registrar) # forcibly load it
96 dir(registrar) # forcibly load it
97 except ImportError:
97 except ImportError:
98 registrar = None
98 registrar = None
99 try:
99 try:
100 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
100 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
101 except ImportError:
101 except ImportError:
102 pass
102 pass
103 try:
103 try:
104 from mercurial.utils import repoviewutil # since 5.0
104 from mercurial.utils import repoviewutil # since 5.0
105 except ImportError:
105 except ImportError:
106 repoviewutil = None
106 repoviewutil = None
107 try:
107 try:
108 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
108 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
109 except ImportError:
109 except ImportError:
110 pass
110 pass
111 try:
111 try:
112 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
112 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
113 except ImportError:
113 except ImportError:
114 pass
114 pass
115
115
116 try:
116 try:
117 from mercurial import profiling
117 from mercurial import profiling
118 except ImportError:
118 except ImportError:
119 profiling = None
119 profiling = None
120
120
121 def identity(a):
121 def identity(a):
122 return a
122 return a
123
123
124 try:
124 try:
125 from mercurial import pycompat
125 from mercurial import pycompat
126 getargspec = pycompat.getargspec # added to module after 4.5
126 getargspec = pycompat.getargspec # added to module after 4.5
127 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
127 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
128 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
128 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
129 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
129 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
130 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
130 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
131 if pycompat.ispy3:
131 if pycompat.ispy3:
132 _maxint = sys.maxsize # per py3 docs for replacing maxint
132 _maxint = sys.maxsize # per py3 docs for replacing maxint
133 else:
133 else:
134 _maxint = sys.maxint
134 _maxint = sys.maxint
135 except (ImportError, AttributeError):
135 except (ImportError, AttributeError):
136 import inspect
136 import inspect
137 getargspec = inspect.getargspec
137 getargspec = inspect.getargspec
138 _byteskwargs = identity
138 _byteskwargs = identity
139 fsencode = identity # no py3 support
139 fsencode = identity # no py3 support
140 _maxint = sys.maxint # no py3 support
140 _maxint = sys.maxint # no py3 support
141 _sysstr = lambda x: x # no py3 support
141 _sysstr = lambda x: x # no py3 support
142 _xrange = xrange
142 _xrange = xrange
143
143
144 try:
144 try:
145 # 4.7+
145 # 4.7+
146 queue = pycompat.queue.Queue
146 queue = pycompat.queue.Queue
147 except (AttributeError, ImportError):
147 except (AttributeError, ImportError):
148 # <4.7.
148 # <4.7.
149 try:
149 try:
150 queue = pycompat.queue
150 queue = pycompat.queue
151 except (AttributeError, ImportError):
151 except (AttributeError, ImportError):
152 queue = util.queue
152 queue = util.queue
153
153
154 try:
154 try:
155 from mercurial import logcmdutil
155 from mercurial import logcmdutil
156 makelogtemplater = logcmdutil.maketemplater
156 makelogtemplater = logcmdutil.maketemplater
157 except (AttributeError, ImportError):
157 except (AttributeError, ImportError):
158 try:
158 try:
159 makelogtemplater = cmdutil.makelogtemplater
159 makelogtemplater = cmdutil.makelogtemplater
160 except (AttributeError, ImportError):
160 except (AttributeError, ImportError):
161 makelogtemplater = None
161 makelogtemplater = None
162
162
163 # for "historical portability":
163 # for "historical portability":
164 # define util.safehasattr forcibly, because util.safehasattr has been
164 # define util.safehasattr forcibly, because util.safehasattr has been
165 # available since 1.9.3 (or 94b200a11cf7)
165 # available since 1.9.3 (or 94b200a11cf7)
166 _undefined = object()
166 _undefined = object()
167 def safehasattr(thing, attr):
167 def safehasattr(thing, attr):
168 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
168 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
169 setattr(util, 'safehasattr', safehasattr)
169 setattr(util, 'safehasattr', safehasattr)
170
170
171 # for "historical portability":
171 # for "historical portability":
172 # define util.timer forcibly, because util.timer has been available
172 # define util.timer forcibly, because util.timer has been available
173 # since ae5d60bb70c9
173 # since ae5d60bb70c9
174 if safehasattr(time, 'perf_counter'):
174 if safehasattr(time, 'perf_counter'):
175 util.timer = time.perf_counter
175 util.timer = time.perf_counter
176 elif os.name == b'nt':
176 elif os.name == b'nt':
177 util.timer = time.clock
177 util.timer = time.clock
178 else:
178 else:
179 util.timer = time.time
179 util.timer = time.time
180
180
181 # for "historical portability":
181 # for "historical portability":
182 # use locally defined empty option list, if formatteropts isn't
182 # use locally defined empty option list, if formatteropts isn't
183 # available, because commands.formatteropts has been available since
183 # available, because commands.formatteropts has been available since
184 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
184 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
185 # available since 2.2 (or ae5f92e154d3)
185 # available since 2.2 (or ae5f92e154d3)
186 formatteropts = getattr(cmdutil, "formatteropts",
186 formatteropts = getattr(cmdutil, "formatteropts",
187 getattr(commands, "formatteropts", []))
187 getattr(commands, "formatteropts", []))
188
188
189 # for "historical portability":
189 # for "historical portability":
190 # use locally defined option list, if debugrevlogopts isn't available,
190 # use locally defined option list, if debugrevlogopts isn't available,
191 # because commands.debugrevlogopts has been available since 3.7 (or
191 # because commands.debugrevlogopts has been available since 3.7 (or
192 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
192 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
193 # since 1.9 (or a79fea6b3e77).
193 # since 1.9 (or a79fea6b3e77).
194 revlogopts = getattr(cmdutil, "debugrevlogopts",
194 revlogopts = getattr(cmdutil, "debugrevlogopts",
195 getattr(commands, "debugrevlogopts", [
195 getattr(commands, "debugrevlogopts", [
196 (b'c', b'changelog', False, (b'open changelog')),
196 (b'c', b'changelog', False, (b'open changelog')),
197 (b'm', b'manifest', False, (b'open manifest')),
197 (b'm', b'manifest', False, (b'open manifest')),
198 (b'', b'dir', False, (b'open directory manifest')),
198 (b'', b'dir', False, (b'open directory manifest')),
199 ]))
199 ]))
200
200
201 cmdtable = {}
201 cmdtable = {}
202
202
203 # for "historical portability":
203 # for "historical portability":
204 # define parsealiases locally, because cmdutil.parsealiases has been
204 # define parsealiases locally, because cmdutil.parsealiases has been
205 # available since 1.5 (or 6252852b4332)
205 # available since 1.5 (or 6252852b4332)
206 def parsealiases(cmd):
206 def parsealiases(cmd):
207 return cmd.split(b"|")
207 return cmd.split(b"|")
208
208
209 if safehasattr(registrar, 'command'):
209 if safehasattr(registrar, 'command'):
210 command = registrar.command(cmdtable)
210 command = registrar.command(cmdtable)
211 elif safehasattr(cmdutil, 'command'):
211 elif safehasattr(cmdutil, 'command'):
212 command = cmdutil.command(cmdtable)
212 command = cmdutil.command(cmdtable)
213 if b'norepo' not in getargspec(command).args:
213 if b'norepo' not in getargspec(command).args:
214 # for "historical portability":
214 # for "historical portability":
215 # wrap original cmdutil.command, because "norepo" option has
215 # wrap original cmdutil.command, because "norepo" option has
216 # been available since 3.1 (or 75a96326cecb)
216 # been available since 3.1 (or 75a96326cecb)
217 _command = command
217 _command = command
218 def command(name, options=(), synopsis=None, norepo=False):
218 def command(name, options=(), synopsis=None, norepo=False):
219 if norepo:
219 if norepo:
220 commands.norepo += b' %s' % b' '.join(parsealiases(name))
220 commands.norepo += b' %s' % b' '.join(parsealiases(name))
221 return _command(name, list(options), synopsis)
221 return _command(name, list(options), synopsis)
222 else:
222 else:
223 # for "historical portability":
223 # for "historical portability":
224 # define "@command" annotation locally, because cmdutil.command
224 # define "@command" annotation locally, because cmdutil.command
225 # has been available since 1.9 (or 2daa5179e73f)
225 # has been available since 1.9 (or 2daa5179e73f)
226 def command(name, options=(), synopsis=None, norepo=False):
226 def command(name, options=(), synopsis=None, norepo=False):
227 def decorator(func):
227 def decorator(func):
228 if synopsis:
228 if synopsis:
229 cmdtable[name] = func, list(options), synopsis
229 cmdtable[name] = func, list(options), synopsis
230 else:
230 else:
231 cmdtable[name] = func, list(options)
231 cmdtable[name] = func, list(options)
232 if norepo:
232 if norepo:
233 commands.norepo += b' %s' % b' '.join(parsealiases(name))
233 commands.norepo += b' %s' % b' '.join(parsealiases(name))
234 return func
234 return func
235 return decorator
235 return decorator
236
236
237 try:
237 try:
238 import mercurial.registrar
238 import mercurial.registrar
239 import mercurial.configitems
239 import mercurial.configitems
240 configtable = {}
240 configtable = {}
241 configitem = mercurial.registrar.configitem(configtable)
241 configitem = mercurial.registrar.configitem(configtable)
242 configitem(b'perf', b'presleep',
242 configitem(b'perf', b'presleep',
243 default=mercurial.configitems.dynamicdefault,
243 default=mercurial.configitems.dynamicdefault,
244 )
244 )
245 configitem(b'perf', b'stub',
245 configitem(b'perf', b'stub',
246 default=mercurial.configitems.dynamicdefault,
246 default=mercurial.configitems.dynamicdefault,
247 )
247 )
248 configitem(b'perf', b'parentscount',
248 configitem(b'perf', b'parentscount',
249 default=mercurial.configitems.dynamicdefault,
249 default=mercurial.configitems.dynamicdefault,
250 )
250 )
251 configitem(b'perf', b'all-timing',
251 configitem(b'perf', b'all-timing',
252 default=mercurial.configitems.dynamicdefault,
252 default=mercurial.configitems.dynamicdefault,
253 )
253 )
254 configitem(b'perf', b'pre-run',
254 configitem(b'perf', b'pre-run',
255 default=mercurial.configitems.dynamicdefault,
255 default=mercurial.configitems.dynamicdefault,
256 )
256 )
257 configitem(b'perf', b'profile-benchmark',
257 configitem(b'perf', b'profile-benchmark',
258 default=mercurial.configitems.dynamicdefault,
258 default=mercurial.configitems.dynamicdefault,
259 )
259 )
260 configitem(b'perf', b'run-limits',
260 configitem(b'perf', b'run-limits',
261 default=mercurial.configitems.dynamicdefault,
261 default=mercurial.configitems.dynamicdefault,
262 )
262 )
263 except (ImportError, AttributeError):
263 except (ImportError, AttributeError):
264 pass
264 pass
265
265
266 def getlen(ui):
266 def getlen(ui):
267 if ui.configbool(b"perf", b"stub", False):
267 if ui.configbool(b"perf", b"stub", False):
268 return lambda x: 1
268 return lambda x: 1
269 return len
269 return len
270
270
271 class noop(object):
271 class noop(object):
272 """dummy context manager"""
272 """dummy context manager"""
273 def __enter__(self):
273 def __enter__(self):
274 pass
274 pass
275 def __exit__(self, *args):
275 def __exit__(self, *args):
276 pass
276 pass
277
277
278 NOOPCTX = noop()
278 NOOPCTX = noop()
279
279
280 def gettimer(ui, opts=None):
280 def gettimer(ui, opts=None):
281 """return a timer function and formatter: (timer, formatter)
281 """return a timer function and formatter: (timer, formatter)
282
282
283 This function exists to gather the creation of formatter in a single
283 This function exists to gather the creation of formatter in a single
284 place instead of duplicating it in all performance commands."""
284 place instead of duplicating it in all performance commands."""
285
285
286 # enforce an idle period before execution to counteract power management
286 # enforce an idle period before execution to counteract power management
287 # experimental config: perf.presleep
287 # experimental config: perf.presleep
288 time.sleep(getint(ui, b"perf", b"presleep", 1))
288 time.sleep(getint(ui, b"perf", b"presleep", 1))
289
289
290 if opts is None:
290 if opts is None:
291 opts = {}
291 opts = {}
292 # redirect all to stderr unless buffer api is in use
292 # redirect all to stderr unless buffer api is in use
293 if not ui._buffers:
293 if not ui._buffers:
294 ui = ui.copy()
294 ui = ui.copy()
295 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
295 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
296 if uifout:
296 if uifout:
297 # for "historical portability":
297 # for "historical portability":
298 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
298 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
299 uifout.set(ui.ferr)
299 uifout.set(ui.ferr)
300
300
301 # get a formatter
301 # get a formatter
302 uiformatter = getattr(ui, 'formatter', None)
302 uiformatter = getattr(ui, 'formatter', None)
303 if uiformatter:
303 if uiformatter:
304 fm = uiformatter(b'perf', opts)
304 fm = uiformatter(b'perf', opts)
305 else:
305 else:
306 # for "historical portability":
306 # for "historical portability":
307 # define formatter locally, because ui.formatter has been
307 # define formatter locally, because ui.formatter has been
308 # available since 2.2 (or ae5f92e154d3)
308 # available since 2.2 (or ae5f92e154d3)
309 from mercurial import node
309 from mercurial import node
310 class defaultformatter(object):
310 class defaultformatter(object):
311 """Minimized composition of baseformatter and plainformatter
311 """Minimized composition of baseformatter and plainformatter
312 """
312 """
313 def __init__(self, ui, topic, opts):
313 def __init__(self, ui, topic, opts):
314 self._ui = ui
314 self._ui = ui
315 if ui.debugflag:
315 if ui.debugflag:
316 self.hexfunc = node.hex
316 self.hexfunc = node.hex
317 else:
317 else:
318 self.hexfunc = node.short
318 self.hexfunc = node.short
319 def __nonzero__(self):
319 def __nonzero__(self):
320 return False
320 return False
321 __bool__ = __nonzero__
321 __bool__ = __nonzero__
322 def startitem(self):
322 def startitem(self):
323 pass
323 pass
324 def data(self, **data):
324 def data(self, **data):
325 pass
325 pass
326 def write(self, fields, deftext, *fielddata, **opts):
326 def write(self, fields, deftext, *fielddata, **opts):
327 self._ui.write(deftext % fielddata, **opts)
327 self._ui.write(deftext % fielddata, **opts)
328 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
328 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
329 if cond:
329 if cond:
330 self._ui.write(deftext % fielddata, **opts)
330 self._ui.write(deftext % fielddata, **opts)
331 def plain(self, text, **opts):
331 def plain(self, text, **opts):
332 self._ui.write(text, **opts)
332 self._ui.write(text, **opts)
333 def end(self):
333 def end(self):
334 pass
334 pass
335 fm = defaultformatter(ui, b'perf', opts)
335 fm = defaultformatter(ui, b'perf', opts)
336
336
337 # stub function, runs code only once instead of in a loop
337 # stub function, runs code only once instead of in a loop
338 # experimental config: perf.stub
338 # experimental config: perf.stub
339 if ui.configbool(b"perf", b"stub", False):
339 if ui.configbool(b"perf", b"stub", False):
340 return functools.partial(stub_timer, fm), fm
340 return functools.partial(stub_timer, fm), fm
341
341
342 # experimental config: perf.all-timing
342 # experimental config: perf.all-timing
343 displayall = ui.configbool(b"perf", b"all-timing", False)
343 displayall = ui.configbool(b"perf", b"all-timing", False)
344
344
345 # experimental config: perf.run-limits
345 # experimental config: perf.run-limits
346 limitspec = ui.configlist(b"perf", b"run-limits", [])
346 limitspec = ui.configlist(b"perf", b"run-limits", [])
347 limits = []
347 limits = []
348 for item in limitspec:
348 for item in limitspec:
349 parts = item.split(b'-', 1)
349 parts = item.split(b'-', 1)
350 if len(parts) < 2:
350 if len(parts) < 2:
351 ui.warn((b'malformatted run limit entry, missing "-": %s\n'
351 ui.warn((b'malformatted run limit entry, missing "-": %s\n'
352 % item))
352 % item))
353 continue
353 continue
354 try:
354 try:
355 time_limit = float(pycompat.sysstr(parts[0]))
355 time_limit = float(pycompat.sysstr(parts[0]))
356 except ValueError as e:
356 except ValueError as e:
357 ui.warn((b'malformatted run limit entry, %s: %s\n'
357 ui.warn((b'malformatted run limit entry, %s: %s\n'
358 % (pycompat.bytestr(e), item)))
358 % (pycompat.bytestr(e), item)))
359 continue
359 continue
360 try:
360 try:
361 run_limit = int(pycompat.sysstr(parts[1]))
361 run_limit = int(pycompat.sysstr(parts[1]))
362 except ValueError as e:
362 except ValueError as e:
363 ui.warn((b'malformatted run limit entry, %s: %s\n'
363 ui.warn((b'malformatted run limit entry, %s: %s\n'
364 % (pycompat.bytestr(e), item)))
364 % (pycompat.bytestr(e), item)))
365 continue
365 continue
366 limits.append((time_limit, run_limit))
366 limits.append((time_limit, run_limit))
367 if not limits:
367 if not limits:
368 limits = DEFAULTLIMITS
368 limits = DEFAULTLIMITS
369
369
370 profiler = None
370 profiler = None
371 if profiling is not None:
371 if profiling is not None:
372 if ui.configbool(b"perf", b"profile-benchmark", False):
372 if ui.configbool(b"perf", b"profile-benchmark", False):
373 profiler = profiling.profile(ui)
373 profiler = profiling.profile(ui)
374
374
375 prerun = getint(ui, b"perf", b"pre-run", 0)
375 prerun = getint(ui, b"perf", b"pre-run", 0)
376 t = functools.partial(_timer, fm, displayall=displayall, limits=limits,
376 t = functools.partial(_timer, fm, displayall=displayall, limits=limits,
377 prerun=prerun, profiler=profiler)
377 prerun=prerun, profiler=profiler)
378 return t, fm
378 return t, fm
379
379
380 def stub_timer(fm, func, setup=None, title=None):
380 def stub_timer(fm, func, setup=None, title=None):
381 if setup is not None:
381 if setup is not None:
382 setup()
382 setup()
383 func()
383 func()
384
384
385 @contextlib.contextmanager
385 @contextlib.contextmanager
386 def timeone():
386 def timeone():
387 r = []
387 r = []
388 ostart = os.times()
388 ostart = os.times()
389 cstart = util.timer()
389 cstart = util.timer()
390 yield r
390 yield r
391 cstop = util.timer()
391 cstop = util.timer()
392 ostop = os.times()
392 ostop = os.times()
393 a, b = ostart, ostop
393 a, b = ostart, ostop
394 r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
394 r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
395
395
396
396
397 # list of stop condition (elapsed time, minimal run count)
397 # list of stop condition (elapsed time, minimal run count)
398 DEFAULTLIMITS = (
398 DEFAULTLIMITS = (
399 (3.0, 100),
399 (3.0, 100),
400 (10.0, 3),
400 (10.0, 3),
401 )
401 )
402
402
403 def _timer(fm, func, setup=None, title=None, displayall=False,
403 def _timer(fm, func, setup=None, title=None, displayall=False,
404 limits=DEFAULTLIMITS, prerun=0, profiler=None):
404 limits=DEFAULTLIMITS, prerun=0, profiler=None):
405 gc.collect()
405 gc.collect()
406 results = []
406 results = []
407 begin = util.timer()
407 begin = util.timer()
408 count = 0
408 count = 0
409 if profiler is None:
409 if profiler is None:
410 profiler = NOOPCTX
410 profiler = NOOPCTX
411 for i in range(prerun):
411 for i in range(prerun):
412 if setup is not None:
412 if setup is not None:
413 setup()
413 setup()
414 func()
414 func()
415 keepgoing = True
415 keepgoing = True
416 while keepgoing:
416 while keepgoing:
417 if setup is not None:
417 if setup is not None:
418 setup()
418 setup()
419 with profiler:
419 with profiler:
420 with timeone() as item:
420 with timeone() as item:
421 r = func()
421 r = func()
422 profiler = NOOPCTX
422 profiler = NOOPCTX
423 count += 1
423 count += 1
424 results.append(item[0])
424 results.append(item[0])
425 cstop = util.timer()
425 cstop = util.timer()
426 # Look for a stop condition.
426 # Look for a stop condition.
427 elapsed = cstop - begin
427 elapsed = cstop - begin
428 for t, mincount in limits:
428 for t, mincount in limits:
429 if elapsed >= t and count >= mincount:
429 if elapsed >= t and count >= mincount:
430 keepgoing = False
430 keepgoing = False
431 break
431 break
432
432
433 formatone(fm, results, title=title, result=r,
433 formatone(fm, results, title=title, result=r,
434 displayall=displayall)
434 displayall=displayall)
435
435
436 def formatone(fm, timings, title=None, result=None, displayall=False):
436 def formatone(fm, timings, title=None, result=None, displayall=False):
437
437
438 count = len(timings)
438 count = len(timings)
439
439
440 fm.startitem()
440 fm.startitem()
441
441
442 if title:
442 if title:
443 fm.write(b'title', b'! %s\n', title)
443 fm.write(b'title', b'! %s\n', title)
444 if result:
444 if result:
445 fm.write(b'result', b'! result: %s\n', result)
445 fm.write(b'result', b'! result: %s\n', result)
446 def display(role, entry):
446 def display(role, entry):
447 prefix = b''
447 prefix = b''
448 if role != b'best':
448 if role != b'best':
449 prefix = b'%s.' % role
449 prefix = b'%s.' % role
450 fm.plain(b'!')
450 fm.plain(b'!')
451 fm.write(prefix + b'wall', b' wall %f', entry[0])
451 fm.write(prefix + b'wall', b' wall %f', entry[0])
452 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
452 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
453 fm.write(prefix + b'user', b' user %f', entry[1])
453 fm.write(prefix + b'user', b' user %f', entry[1])
454 fm.write(prefix + b'sys', b' sys %f', entry[2])
454 fm.write(prefix + b'sys', b' sys %f', entry[2])
455 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
455 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
456 fm.plain(b'\n')
456 fm.plain(b'\n')
457 timings.sort()
457 timings.sort()
458 min_val = timings[0]
458 min_val = timings[0]
459 display(b'best', min_val)
459 display(b'best', min_val)
460 if displayall:
460 if displayall:
461 max_val = timings[-1]
461 max_val = timings[-1]
462 display(b'max', max_val)
462 display(b'max', max_val)
463 avg = tuple([sum(x) / count for x in zip(*timings)])
463 avg = tuple([sum(x) / count for x in zip(*timings)])
464 display(b'avg', avg)
464 display(b'avg', avg)
465 median = timings[len(timings) // 2]
465 median = timings[len(timings) // 2]
466 display(b'median', median)
466 display(b'median', median)
467
467
468 # utilities for historical portability
468 # utilities for historical portability
469
469
470 def getint(ui, section, name, default):
470 def getint(ui, section, name, default):
471 # for "historical portability":
471 # for "historical portability":
472 # ui.configint has been available since 1.9 (or fa2b596db182)
472 # ui.configint has been available since 1.9 (or fa2b596db182)
473 v = ui.config(section, name, None)
473 v = ui.config(section, name, None)
474 if v is None:
474 if v is None:
475 return default
475 return default
476 try:
476 try:
477 return int(v)
477 return int(v)
478 except ValueError:
478 except ValueError:
479 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
479 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
480 % (section, name, v))
480 % (section, name, v))
481
481
482 def safeattrsetter(obj, name, ignoremissing=False):
482 def safeattrsetter(obj, name, ignoremissing=False):
483 """Ensure that 'obj' has 'name' attribute before subsequent setattr
483 """Ensure that 'obj' has 'name' attribute before subsequent setattr
484
484
485 This function is aborted, if 'obj' doesn't have 'name' attribute
485 This function is aborted, if 'obj' doesn't have 'name' attribute
486 at runtime. This avoids overlooking removal of an attribute, which
486 at runtime. This avoids overlooking removal of an attribute, which
487 breaks assumption of performance measurement, in the future.
487 breaks assumption of performance measurement, in the future.
488
488
489 This function returns the object to (1) assign a new value, and
489 This function returns the object to (1) assign a new value, and
490 (2) restore an original value to the attribute.
490 (2) restore an original value to the attribute.
491
491
492 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
492 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
493 abortion, and this function returns None. This is useful to
493 abortion, and this function returns None. This is useful to
494 examine an attribute, which isn't ensured in all Mercurial
494 examine an attribute, which isn't ensured in all Mercurial
495 versions.
495 versions.
496 """
496 """
497 if not util.safehasattr(obj, name):
497 if not util.safehasattr(obj, name):
498 if ignoremissing:
498 if ignoremissing:
499 return None
499 return None
500 raise error.Abort((b"missing attribute %s of %s might break assumption"
500 raise error.Abort((b"missing attribute %s of %s might break assumption"
501 b" of performance measurement") % (name, obj))
501 b" of performance measurement") % (name, obj))
502
502
503 origvalue = getattr(obj, _sysstr(name))
503 origvalue = getattr(obj, _sysstr(name))
504 class attrutil(object):
504 class attrutil(object):
505 def set(self, newvalue):
505 def set(self, newvalue):
506 setattr(obj, _sysstr(name), newvalue)
506 setattr(obj, _sysstr(name), newvalue)
507 def restore(self):
507 def restore(self):
508 setattr(obj, _sysstr(name), origvalue)
508 setattr(obj, _sysstr(name), origvalue)
509
509
510 return attrutil()
510 return attrutil()
511
511
512 # utilities to examine each internal API changes
512 # utilities to examine each internal API changes
513
513
514 def getbranchmapsubsettable():
514 def getbranchmapsubsettable():
515 # for "historical portability":
515 # for "historical portability":
516 # subsettable is defined in:
516 # subsettable is defined in:
517 # - branchmap since 2.9 (or 175c6fd8cacc)
517 # - branchmap since 2.9 (or 175c6fd8cacc)
518 # - repoview since 2.5 (or 59a9f18d4587)
518 # - repoview since 2.5 (or 59a9f18d4587)
519 # - repoviewutil since 5.0
519 # - repoviewutil since 5.0
520 for mod in (branchmap, repoview, repoviewutil):
520 for mod in (branchmap, repoview, repoviewutil):
521 subsettable = getattr(mod, 'subsettable', None)
521 subsettable = getattr(mod, 'subsettable', None)
522 if subsettable:
522 if subsettable:
523 return subsettable
523 return subsettable
524
524
525 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
525 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
526 # branchmap and repoview modules exist, but subsettable attribute
526 # branchmap and repoview modules exist, but subsettable attribute
527 # doesn't)
527 # doesn't)
528 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
528 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
529 hint=b"use 2.5 or later")
529 hint=b"use 2.5 or later")
530
530
531 def getsvfs(repo):
531 def getsvfs(repo):
532 """Return appropriate object to access files under .hg/store
532 """Return appropriate object to access files under .hg/store
533 """
533 """
534 # for "historical portability":
534 # for "historical portability":
535 # repo.svfs has been available since 2.3 (or 7034365089bf)
535 # repo.svfs has been available since 2.3 (or 7034365089bf)
536 svfs = getattr(repo, 'svfs', None)
536 svfs = getattr(repo, 'svfs', None)
537 if svfs:
537 if svfs:
538 return svfs
538 return svfs
539 else:
539 else:
540 return getattr(repo, 'sopener')
540 return getattr(repo, 'sopener')
541
541
542 def getvfs(repo):
542 def getvfs(repo):
543 """Return appropriate object to access files under .hg
543 """Return appropriate object to access files under .hg
544 """
544 """
545 # for "historical portability":
545 # for "historical portability":
546 # repo.vfs has been available since 2.3 (or 7034365089bf)
546 # repo.vfs has been available since 2.3 (or 7034365089bf)
547 vfs = getattr(repo, 'vfs', None)
547 vfs = getattr(repo, 'vfs', None)
548 if vfs:
548 if vfs:
549 return vfs
549 return vfs
550 else:
550 else:
551 return getattr(repo, 'opener')
551 return getattr(repo, 'opener')
552
552
553 def repocleartagscachefunc(repo):
553 def repocleartagscachefunc(repo):
554 """Return the function to clear tags cache according to repo internal API
554 """Return the function to clear tags cache according to repo internal API
555 """
555 """
556 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
556 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
557 # in this case, setattr(repo, '_tagscache', None) or so isn't
557 # in this case, setattr(repo, '_tagscache', None) or so isn't
558 # correct way to clear tags cache, because existing code paths
558 # correct way to clear tags cache, because existing code paths
559 # expect _tagscache to be a structured object.
559 # expect _tagscache to be a structured object.
560 def clearcache():
560 def clearcache():
561 # _tagscache has been filteredpropertycache since 2.5 (or
561 # _tagscache has been filteredpropertycache since 2.5 (or
562 # 98c867ac1330), and delattr() can't work in such case
562 # 98c867ac1330), and delattr() can't work in such case
563 if b'_tagscache' in vars(repo):
563 if b'_tagscache' in vars(repo):
564 del repo.__dict__[b'_tagscache']
564 del repo.__dict__[b'_tagscache']
565 return clearcache
565 return clearcache
566
566
567 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
567 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
568 if repotags: # since 1.4 (or 5614a628d173)
568 if repotags: # since 1.4 (or 5614a628d173)
569 return lambda : repotags.set(None)
569 return lambda : repotags.set(None)
570
570
571 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
571 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
572 if repotagscache: # since 0.6 (or d7df759d0e97)
572 if repotagscache: # since 0.6 (or d7df759d0e97)
573 return lambda : repotagscache.set(None)
573 return lambda : repotagscache.set(None)
574
574
575 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
575 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
576 # this point, but it isn't so problematic, because:
576 # this point, but it isn't so problematic, because:
577 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
577 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
578 # in perftags() causes failure soon
578 # in perftags() causes failure soon
579 # - perf.py itself has been available since 1.1 (or eb240755386d)
579 # - perf.py itself has been available since 1.1 (or eb240755386d)
580 raise error.Abort((b"tags API of this hg command is unknown"))
580 raise error.Abort((b"tags API of this hg command is unknown"))
581
581
582 # utilities to clear cache
582 # utilities to clear cache
583
583
584 def clearfilecache(obj, attrname):
584 def clearfilecache(obj, attrname):
585 unfiltered = getattr(obj, 'unfiltered', None)
585 unfiltered = getattr(obj, 'unfiltered', None)
586 if unfiltered is not None:
586 if unfiltered is not None:
587 obj = obj.unfiltered()
587 obj = obj.unfiltered()
588 if attrname in vars(obj):
588 if attrname in vars(obj):
589 delattr(obj, attrname)
589 delattr(obj, attrname)
590 obj._filecache.pop(attrname, None)
590 obj._filecache.pop(attrname, None)
591
591
592 def clearchangelog(repo):
592 def clearchangelog(repo):
593 if repo is not repo.unfiltered():
593 if repo is not repo.unfiltered():
594 object.__setattr__(repo, r'_clcachekey', None)
594 object.__setattr__(repo, r'_clcachekey', None)
595 object.__setattr__(repo, r'_clcache', None)
595 object.__setattr__(repo, r'_clcache', None)
596 clearfilecache(repo.unfiltered(), 'changelog')
596 clearfilecache(repo.unfiltered(), 'changelog')
597
597
598 # perf commands
598 # perf commands
599
599
600 @command(b'perfwalk', formatteropts)
600 @command(b'perfwalk', formatteropts)
601 def perfwalk(ui, repo, *pats, **opts):
601 def perfwalk(ui, repo, *pats, **opts):
602 opts = _byteskwargs(opts)
602 opts = _byteskwargs(opts)
603 timer, fm = gettimer(ui, opts)
603 timer, fm = gettimer(ui, opts)
604 m = scmutil.match(repo[None], pats, {})
604 m = scmutil.match(repo[None], pats, {})
605 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
605 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
606 ignored=False))))
606 ignored=False))))
607 fm.end()
607 fm.end()
608
608
609 @command(b'perfannotate', formatteropts)
609 @command(b'perfannotate', formatteropts)
610 def perfannotate(ui, repo, f, **opts):
610 def perfannotate(ui, repo, f, **opts):
611 opts = _byteskwargs(opts)
611 opts = _byteskwargs(opts)
612 timer, fm = gettimer(ui, opts)
612 timer, fm = gettimer(ui, opts)
613 fc = repo[b'.'][f]
613 fc = repo[b'.'][f]
614 timer(lambda: len(fc.annotate(True)))
614 timer(lambda: len(fc.annotate(True)))
615 fm.end()
615 fm.end()
616
616
617 @command(b'perfstatus',
617 @command(b'perfstatus',
618 [(b'u', b'unknown', False,
618 [(b'u', b'unknown', False,
619 b'ask status to look for unknown files')] + formatteropts)
619 b'ask status to look for unknown files')] + formatteropts)
620 def perfstatus(ui, repo, **opts):
620 def perfstatus(ui, repo, **opts):
621 opts = _byteskwargs(opts)
621 opts = _byteskwargs(opts)
622 #m = match.always(repo.root, repo.getcwd())
622 #m = match.always(repo.root, repo.getcwd())
623 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
623 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
624 # False))))
624 # False))))
625 timer, fm = gettimer(ui, opts)
625 timer, fm = gettimer(ui, opts)
626 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
626 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
627 fm.end()
627 fm.end()
628
628
629 @command(b'perfaddremove', formatteropts)
629 @command(b'perfaddremove', formatteropts)
630 def perfaddremove(ui, repo, **opts):
630 def perfaddremove(ui, repo, **opts):
631 opts = _byteskwargs(opts)
631 opts = _byteskwargs(opts)
632 timer, fm = gettimer(ui, opts)
632 timer, fm = gettimer(ui, opts)
633 try:
633 try:
634 oldquiet = repo.ui.quiet
634 oldquiet = repo.ui.quiet
635 repo.ui.quiet = True
635 repo.ui.quiet = True
636 matcher = scmutil.match(repo[None])
636 matcher = scmutil.match(repo[None])
637 opts[b'dry_run'] = True
637 opts[b'dry_run'] = True
638 if b'uipathfn' in getargspec(scmutil.addremove).args:
638 if b'uipathfn' in getargspec(scmutil.addremove).args:
639 uipathfn = scmutil.getuipathfn(repo)
639 uipathfn = scmutil.getuipathfn(repo)
640 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
640 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
641 else:
641 else:
642 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
642 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
643 finally:
643 finally:
644 repo.ui.quiet = oldquiet
644 repo.ui.quiet = oldquiet
645 fm.end()
645 fm.end()
646
646
647 def clearcaches(cl):
647 def clearcaches(cl):
648 # behave somewhat consistently across internal API changes
648 # behave somewhat consistently across internal API changes
649 if util.safehasattr(cl, b'clearcaches'):
649 if util.safehasattr(cl, b'clearcaches'):
650 cl.clearcaches()
650 cl.clearcaches()
651 elif util.safehasattr(cl, b'_nodecache'):
651 elif util.safehasattr(cl, b'_nodecache'):
652 from mercurial.node import nullid, nullrev
652 from mercurial.node import nullid, nullrev
653 cl._nodecache = {nullid: nullrev}
653 cl._nodecache = {nullid: nullrev}
654 cl._nodepos = None
654 cl._nodepos = None
655
655
656 @command(b'perfheads', formatteropts)
656 @command(b'perfheads', formatteropts)
657 def perfheads(ui, repo, **opts):
657 def perfheads(ui, repo, **opts):
658 """benchmark the computation of a changelog heads"""
658 """benchmark the computation of a changelog heads"""
659 opts = _byteskwargs(opts)
659 opts = _byteskwargs(opts)
660 timer, fm = gettimer(ui, opts)
660 timer, fm = gettimer(ui, opts)
661 cl = repo.changelog
661 cl = repo.changelog
662 def s():
662 def s():
663 clearcaches(cl)
663 clearcaches(cl)
664 def d():
664 def d():
665 len(cl.headrevs())
665 len(cl.headrevs())
666 timer(d, setup=s)
666 timer(d, setup=s)
667 fm.end()
667 fm.end()
668
668
669 @command(b'perftags', formatteropts+
669 @command(b'perftags', formatteropts+
670 [
670 [
671 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
671 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
672 ])
672 ])
673 def perftags(ui, repo, **opts):
673 def perftags(ui, repo, **opts):
674 opts = _byteskwargs(opts)
674 opts = _byteskwargs(opts)
675 timer, fm = gettimer(ui, opts)
675 timer, fm = gettimer(ui, opts)
676 repocleartagscache = repocleartagscachefunc(repo)
676 repocleartagscache = repocleartagscachefunc(repo)
677 clearrevlogs = opts[b'clear_revlogs']
677 clearrevlogs = opts[b'clear_revlogs']
678 def s():
678 def s():
679 if clearrevlogs:
679 if clearrevlogs:
680 clearchangelog(repo)
680 clearchangelog(repo)
681 clearfilecache(repo.unfiltered(), 'manifest')
681 clearfilecache(repo.unfiltered(), 'manifest')
682 repocleartagscache()
682 repocleartagscache()
683 def t():
683 def t():
684 return len(repo.tags())
684 return len(repo.tags())
685 timer(t, setup=s)
685 timer(t, setup=s)
686 fm.end()
686 fm.end()
687
687
688 @command(b'perfancestors', formatteropts)
688 @command(b'perfancestors', formatteropts)
689 def perfancestors(ui, repo, **opts):
689 def perfancestors(ui, repo, **opts):
690 opts = _byteskwargs(opts)
690 opts = _byteskwargs(opts)
691 timer, fm = gettimer(ui, opts)
691 timer, fm = gettimer(ui, opts)
692 heads = repo.changelog.headrevs()
692 heads = repo.changelog.headrevs()
693 def d():
693 def d():
694 for a in repo.changelog.ancestors(heads):
694 for a in repo.changelog.ancestors(heads):
695 pass
695 pass
696 timer(d)
696 timer(d)
697 fm.end()
697 fm.end()
698
698
699 @command(b'perfancestorset', formatteropts)
699 @command(b'perfancestorset', formatteropts)
700 def perfancestorset(ui, repo, revset, **opts):
700 def perfancestorset(ui, repo, revset, **opts):
701 opts = _byteskwargs(opts)
701 opts = _byteskwargs(opts)
702 timer, fm = gettimer(ui, opts)
702 timer, fm = gettimer(ui, opts)
703 revs = repo.revs(revset)
703 revs = repo.revs(revset)
704 heads = repo.changelog.headrevs()
704 heads = repo.changelog.headrevs()
705 def d():
705 def d():
706 s = repo.changelog.ancestors(heads)
706 s = repo.changelog.ancestors(heads)
707 for rev in revs:
707 for rev in revs:
708 rev in s
708 rev in s
709 timer(d)
709 timer(d)
710 fm.end()
710 fm.end()
711
711
712 @command(b'perfdiscovery', formatteropts, b'PATH')
712 @command(b'perfdiscovery', formatteropts, b'PATH')
713 def perfdiscovery(ui, repo, path, **opts):
713 def perfdiscovery(ui, repo, path, **opts):
714 """benchmark discovery between local repo and the peer at given path
714 """benchmark discovery between local repo and the peer at given path
715 """
715 """
716 repos = [repo, None]
716 repos = [repo, None]
717 timer, fm = gettimer(ui, opts)
717 timer, fm = gettimer(ui, opts)
718 path = ui.expandpath(path)
718 path = ui.expandpath(path)
719
719
720 def s():
720 def s():
721 repos[1] = hg.peer(ui, opts, path)
721 repos[1] = hg.peer(ui, opts, path)
722 def d():
722 def d():
723 setdiscovery.findcommonheads(ui, *repos)
723 setdiscovery.findcommonheads(ui, *repos)
724 timer(d, setup=s)
724 timer(d, setup=s)
725 fm.end()
725 fm.end()
726
726
727 @command(b'perfbookmarks', formatteropts +
727 @command(b'perfbookmarks', formatteropts +
728 [
728 [
729 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
729 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
730 ])
730 ])
731 def perfbookmarks(ui, repo, **opts):
731 def perfbookmarks(ui, repo, **opts):
732 """benchmark parsing bookmarks from disk to memory"""
732 """benchmark parsing bookmarks from disk to memory"""
733 opts = _byteskwargs(opts)
733 opts = _byteskwargs(opts)
734 timer, fm = gettimer(ui, opts)
734 timer, fm = gettimer(ui, opts)
735
735
736 clearrevlogs = opts[b'clear_revlogs']
736 clearrevlogs = opts[b'clear_revlogs']
737 def s():
737 def s():
738 if clearrevlogs:
738 if clearrevlogs:
739 clearchangelog(repo)
739 clearchangelog(repo)
740 clearfilecache(repo, b'_bookmarks')
740 clearfilecache(repo, b'_bookmarks')
741 def d():
741 def d():
742 repo._bookmarks
742 repo._bookmarks
743 timer(d, setup=s)
743 timer(d, setup=s)
744 fm.end()
744 fm.end()
745
745
746 @command(b'perfbundleread', formatteropts, b'BUNDLE')
746 @command(b'perfbundleread', formatteropts, b'BUNDLE')
747 def perfbundleread(ui, repo, bundlepath, **opts):
747 def perfbundleread(ui, repo, bundlepath, **opts):
748 """Benchmark reading of bundle files.
748 """Benchmark reading of bundle files.
749
749
750 This command is meant to isolate the I/O part of bundle reading as
750 This command is meant to isolate the I/O part of bundle reading as
751 much as possible.
751 much as possible.
752 """
752 """
753 from mercurial import (
753 from mercurial import (
754 bundle2,
754 bundle2,
755 exchange,
755 exchange,
756 streamclone,
756 streamclone,
757 )
757 )
758
758
759 opts = _byteskwargs(opts)
759 opts = _byteskwargs(opts)
760
760
761 def makebench(fn):
761 def makebench(fn):
762 def run():
762 def run():
763 with open(bundlepath, b'rb') as fh:
763 with open(bundlepath, b'rb') as fh:
764 bundle = exchange.readbundle(ui, fh, bundlepath)
764 bundle = exchange.readbundle(ui, fh, bundlepath)
765 fn(bundle)
765 fn(bundle)
766
766
767 return run
767 return run
768
768
769 def makereadnbytes(size):
769 def makereadnbytes(size):
770 def run():
770 def run():
771 with open(bundlepath, b'rb') as fh:
771 with open(bundlepath, b'rb') as fh:
772 bundle = exchange.readbundle(ui, fh, bundlepath)
772 bundle = exchange.readbundle(ui, fh, bundlepath)
773 while bundle.read(size):
773 while bundle.read(size):
774 pass
774 pass
775
775
776 return run
776 return run
777
777
778 def makestdioread(size):
778 def makestdioread(size):
779 def run():
779 def run():
780 with open(bundlepath, b'rb') as fh:
780 with open(bundlepath, b'rb') as fh:
781 while fh.read(size):
781 while fh.read(size):
782 pass
782 pass
783
783
784 return run
784 return run
785
785
786 # bundle1
786 # bundle1
787
787
788 def deltaiter(bundle):
788 def deltaiter(bundle):
789 for delta in bundle.deltaiter():
789 for delta in bundle.deltaiter():
790 pass
790 pass
791
791
792 def iterchunks(bundle):
792 def iterchunks(bundle):
793 for chunk in bundle.getchunks():
793 for chunk in bundle.getchunks():
794 pass
794 pass
795
795
796 # bundle2
796 # bundle2
797
797
798 def forwardchunks(bundle):
798 def forwardchunks(bundle):
799 for chunk in bundle._forwardchunks():
799 for chunk in bundle._forwardchunks():
800 pass
800 pass
801
801
802 def iterparts(bundle):
802 def iterparts(bundle):
803 for part in bundle.iterparts():
803 for part in bundle.iterparts():
804 pass
804 pass
805
805
806 def iterpartsseekable(bundle):
806 def iterpartsseekable(bundle):
807 for part in bundle.iterparts(seekable=True):
807 for part in bundle.iterparts(seekable=True):
808 pass
808 pass
809
809
810 def seek(bundle):
810 def seek(bundle):
811 for part in bundle.iterparts(seekable=True):
811 for part in bundle.iterparts(seekable=True):
812 part.seek(0, os.SEEK_END)
812 part.seek(0, os.SEEK_END)
813
813
814 def makepartreadnbytes(size):
814 def makepartreadnbytes(size):
815 def run():
815 def run():
816 with open(bundlepath, b'rb') as fh:
816 with open(bundlepath, b'rb') as fh:
817 bundle = exchange.readbundle(ui, fh, bundlepath)
817 bundle = exchange.readbundle(ui, fh, bundlepath)
818 for part in bundle.iterparts():
818 for part in bundle.iterparts():
819 while part.read(size):
819 while part.read(size):
820 pass
820 pass
821
821
822 return run
822 return run
823
823
824 benches = [
824 benches = [
825 (makestdioread(8192), b'read(8k)'),
825 (makestdioread(8192), b'read(8k)'),
826 (makestdioread(16384), b'read(16k)'),
826 (makestdioread(16384), b'read(16k)'),
827 (makestdioread(32768), b'read(32k)'),
827 (makestdioread(32768), b'read(32k)'),
828 (makestdioread(131072), b'read(128k)'),
828 (makestdioread(131072), b'read(128k)'),
829 ]
829 ]
830
830
831 with open(bundlepath, b'rb') as fh:
831 with open(bundlepath, b'rb') as fh:
832 bundle = exchange.readbundle(ui, fh, bundlepath)
832 bundle = exchange.readbundle(ui, fh, bundlepath)
833
833
834 if isinstance(bundle, changegroup.cg1unpacker):
834 if isinstance(bundle, changegroup.cg1unpacker):
835 benches.extend([
835 benches.extend([
836 (makebench(deltaiter), b'cg1 deltaiter()'),
836 (makebench(deltaiter), b'cg1 deltaiter()'),
837 (makebench(iterchunks), b'cg1 getchunks()'),
837 (makebench(iterchunks), b'cg1 getchunks()'),
838 (makereadnbytes(8192), b'cg1 read(8k)'),
838 (makereadnbytes(8192), b'cg1 read(8k)'),
839 (makereadnbytes(16384), b'cg1 read(16k)'),
839 (makereadnbytes(16384), b'cg1 read(16k)'),
840 (makereadnbytes(32768), b'cg1 read(32k)'),
840 (makereadnbytes(32768), b'cg1 read(32k)'),
841 (makereadnbytes(131072), b'cg1 read(128k)'),
841 (makereadnbytes(131072), b'cg1 read(128k)'),
842 ])
842 ])
843 elif isinstance(bundle, bundle2.unbundle20):
843 elif isinstance(bundle, bundle2.unbundle20):
844 benches.extend([
844 benches.extend([
845 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
845 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
846 (makebench(iterparts), b'bundle2 iterparts()'),
846 (makebench(iterparts), b'bundle2 iterparts()'),
847 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
847 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
848 (makebench(seek), b'bundle2 part seek()'),
848 (makebench(seek), b'bundle2 part seek()'),
849 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
849 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
850 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
850 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
851 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
851 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
852 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
852 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
853 ])
853 ])
854 elif isinstance(bundle, streamclone.streamcloneapplier):
854 elif isinstance(bundle, streamclone.streamcloneapplier):
855 raise error.Abort(b'stream clone bundles not supported')
855 raise error.Abort(b'stream clone bundles not supported')
856 else:
856 else:
857 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
857 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
858
858
859 for fn, title in benches:
859 for fn, title in benches:
860 timer, fm = gettimer(ui, opts)
860 timer, fm = gettimer(ui, opts)
861 timer(fn, title=title)
861 timer(fn, title=title)
862 fm.end()
862 fm.end()
863
863
864 @command(b'perfchangegroupchangelog', formatteropts +
864 @command(b'perfchangegroupchangelog', formatteropts +
865 [(b'', b'cgversion', b'02', b'changegroup version'),
865 [(b'', b'cgversion', b'02', b'changegroup version'),
866 (b'r', b'rev', b'', b'revisions to add to changegroup')])
866 (b'r', b'rev', b'', b'revisions to add to changegroup')])
867 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
867 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
868 """Benchmark producing a changelog group for a changegroup.
868 """Benchmark producing a changelog group for a changegroup.
869
869
870 This measures the time spent processing the changelog during a
870 This measures the time spent processing the changelog during a
871 bundle operation. This occurs during `hg bundle` and on a server
871 bundle operation. This occurs during `hg bundle` and on a server
872 processing a `getbundle` wire protocol request (handles clones
872 processing a `getbundle` wire protocol request (handles clones
873 and pull requests).
873 and pull requests).
874
874
875 By default, all revisions are added to the changegroup.
875 By default, all revisions are added to the changegroup.
876 """
876 """
877 opts = _byteskwargs(opts)
877 opts = _byteskwargs(opts)
878 cl = repo.changelog
878 cl = repo.changelog
879 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
879 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
880 bundler = changegroup.getbundler(cgversion, repo)
880 bundler = changegroup.getbundler(cgversion, repo)
881
881
882 def d():
882 def d():
883 state, chunks = bundler._generatechangelog(cl, nodes)
883 state, chunks = bundler._generatechangelog(cl, nodes)
884 for chunk in chunks:
884 for chunk in chunks:
885 pass
885 pass
886
886
887 timer, fm = gettimer(ui, opts)
887 timer, fm = gettimer(ui, opts)
888
888
889 # Terminal printing can interfere with timing. So disable it.
889 # Terminal printing can interfere with timing. So disable it.
890 with ui.configoverride({(b'progress', b'disable'): True}):
890 with ui.configoverride({(b'progress', b'disable'): True}):
891 timer(d)
891 timer(d)
892
892
893 fm.end()
893 fm.end()
894
894
895 @command(b'perfdirs', formatteropts)
895 @command(b'perfdirs', formatteropts)
896 def perfdirs(ui, repo, **opts):
896 def perfdirs(ui, repo, **opts):
897 opts = _byteskwargs(opts)
897 opts = _byteskwargs(opts)
898 timer, fm = gettimer(ui, opts)
898 timer, fm = gettimer(ui, opts)
899 dirstate = repo.dirstate
899 dirstate = repo.dirstate
900 b'a' in dirstate
900 b'a' in dirstate
901 def d():
901 def d():
902 dirstate.hasdir(b'a')
902 dirstate.hasdir(b'a')
903 del dirstate._map._dirs
903 del dirstate._map._dirs
904 timer(d)
904 timer(d)
905 fm.end()
905 fm.end()
906
906
907 @command(b'perfdirstate', formatteropts)
907 @command(b'perfdirstate', formatteropts)
908 def perfdirstate(ui, repo, **opts):
908 def perfdirstate(ui, repo, **opts):
909 opts = _byteskwargs(opts)
909 opts = _byteskwargs(opts)
910 timer, fm = gettimer(ui, opts)
910 timer, fm = gettimer(ui, opts)
911 b"a" in repo.dirstate
911 b"a" in repo.dirstate
912 def d():
912 def d():
913 repo.dirstate.invalidate()
913 repo.dirstate.invalidate()
914 b"a" in repo.dirstate
914 b"a" in repo.dirstate
915 timer(d)
915 timer(d)
916 fm.end()
916 fm.end()
917
917
918 @command(b'perfdirstatedirs', formatteropts)
918 @command(b'perfdirstatedirs', formatteropts)
919 def perfdirstatedirs(ui, repo, **opts):
919 def perfdirstatedirs(ui, repo, **opts):
920 opts = _byteskwargs(opts)
920 opts = _byteskwargs(opts)
921 timer, fm = gettimer(ui, opts)
921 timer, fm = gettimer(ui, opts)
922 b"a" in repo.dirstate
922 b"a" in repo.dirstate
923 def d():
923 def d():
924 repo.dirstate.hasdir(b"a")
924 repo.dirstate.hasdir(b"a")
925 del repo.dirstate._map._dirs
925 del repo.dirstate._map._dirs
926 timer(d)
926 timer(d)
927 fm.end()
927 fm.end()
928
928
929 @command(b'perfdirstatefoldmap', formatteropts)
929 @command(b'perfdirstatefoldmap', formatteropts)
930 def perfdirstatefoldmap(ui, repo, **opts):
930 def perfdirstatefoldmap(ui, repo, **opts):
931 opts = _byteskwargs(opts)
931 opts = _byteskwargs(opts)
932 timer, fm = gettimer(ui, opts)
932 timer, fm = gettimer(ui, opts)
933 dirstate = repo.dirstate
933 dirstate = repo.dirstate
934 b'a' in dirstate
934 b'a' in dirstate
935 def d():
935 def d():
936 dirstate._map.filefoldmap.get(b'a')
936 dirstate._map.filefoldmap.get(b'a')
937 del dirstate._map.filefoldmap
937 del dirstate._map.filefoldmap
938 timer(d)
938 timer(d)
939 fm.end()
939 fm.end()
940
940
941 @command(b'perfdirfoldmap', formatteropts)
941 @command(b'perfdirfoldmap', formatteropts)
942 def perfdirfoldmap(ui, repo, **opts):
942 def perfdirfoldmap(ui, repo, **opts):
943 opts = _byteskwargs(opts)
943 opts = _byteskwargs(opts)
944 timer, fm = gettimer(ui, opts)
944 timer, fm = gettimer(ui, opts)
945 dirstate = repo.dirstate
945 dirstate = repo.dirstate
946 b'a' in dirstate
946 b'a' in dirstate
947 def d():
947 def d():
948 dirstate._map.dirfoldmap.get(b'a')
948 dirstate._map.dirfoldmap.get(b'a')
949 del dirstate._map.dirfoldmap
949 del dirstate._map.dirfoldmap
950 del dirstate._map._dirs
950 del dirstate._map._dirs
951 timer(d)
951 timer(d)
952 fm.end()
952 fm.end()
953
953
954 @command(b'perfdirstatewrite', formatteropts)
954 @command(b'perfdirstatewrite', formatteropts)
955 def perfdirstatewrite(ui, repo, **opts):
955 def perfdirstatewrite(ui, repo, **opts):
956 opts = _byteskwargs(opts)
956 opts = _byteskwargs(opts)
957 timer, fm = gettimer(ui, opts)
957 timer, fm = gettimer(ui, opts)
958 ds = repo.dirstate
958 ds = repo.dirstate
959 b"a" in ds
959 b"a" in ds
960 def d():
960 def d():
961 ds._dirty = True
961 ds._dirty = True
962 ds.write(repo.currenttransaction())
962 ds.write(repo.currenttransaction())
963 timer(d)
963 timer(d)
964 fm.end()
964 fm.end()
965
965
966 @command(b'perfmergecalculate',
966 @command(b'perfmergecalculate',
967 [
967 [
968 (b'r', b'rev', b'.', b'rev to merge against'),
968 (b'r', b'rev', b'.', b'rev to merge against'),
969 (b'', b'from', b'', b'rev to merge from'),
969 (b'', b'from', b'', b'rev to merge from'),
970 (b'', b'base', b'', b'the revision to use as base'),
970 ] + formatteropts)
971 ] + formatteropts)
971 def perfmergecalculate(ui, repo, rev, **opts):
972 def perfmergecalculate(ui, repo, rev, **opts):
972 opts = _byteskwargs(opts)
973 opts = _byteskwargs(opts)
973 timer, fm = gettimer(ui, opts)
974 timer, fm = gettimer(ui, opts)
974
975
975 if opts['from']:
976 if opts['from']:
976 fromrev = scmutil.revsingle(repo, opts['from'])
977 fromrev = scmutil.revsingle(repo, opts['from'])
977 wctx = repo[fromrev]
978 wctx = repo[fromrev]
978 else:
979 else:
979 wctx = repo[None]
980 wctx = repo[None]
980 # we don't want working dir files to be stat'd in the benchmark, so
981 # we don't want working dir files to be stat'd in the benchmark, so
981 # prime that cache
982 # prime that cache
982 wctx.dirty()
983 wctx.dirty()
983 rctx = scmutil.revsingle(repo, rev, rev)
984 rctx = scmutil.revsingle(repo, rev, rev)
985 if opts['base']:
986 fromrev = scmutil.revsingle(repo, opts['base'])
987 ancestor = repo[fromrev]
988 else:
984 ancestor = wctx.ancestor(rctx)
989 ancestor = wctx.ancestor(rctx)
985 def d():
990 def d():
986 # acceptremote is True because we don't want prompts in the middle of
991 # acceptremote is True because we don't want prompts in the middle of
987 # our benchmark
992 # our benchmark
988 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
993 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
989 acceptremote=True, followcopies=True)
994 acceptremote=True, followcopies=True)
990 timer(d)
995 timer(d)
991 fm.end()
996 fm.end()
997 def d():
998 # acceptremote is True because we don't want prompts in the middle of
999 # our benchmark
1000 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
1001 acceptremote=True, followcopies=True)
1002 timer(d)
1003 fm.end()
992
1004
993 @command(b'perfpathcopies', [], b"REV REV")
1005 @command(b'perfpathcopies', [], b"REV REV")
994 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1006 def perfpathcopies(ui, repo, rev1, rev2, **opts):
995 """benchmark the copy tracing logic"""
1007 """benchmark the copy tracing logic"""
996 opts = _byteskwargs(opts)
1008 opts = _byteskwargs(opts)
997 timer, fm = gettimer(ui, opts)
1009 timer, fm = gettimer(ui, opts)
998 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1010 ctx1 = scmutil.revsingle(repo, rev1, rev1)
999 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1011 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1000 def d():
1012 def d():
1001 copies.pathcopies(ctx1, ctx2)
1013 copies.pathcopies(ctx1, ctx2)
1002 timer(d)
1014 timer(d)
1003 fm.end()
1015 fm.end()
1004
1016
1005 @command(b'perfphases',
1017 @command(b'perfphases',
1006 [(b'', b'full', False, b'include file reading time too'),
1018 [(b'', b'full', False, b'include file reading time too'),
1007 ], b"")
1019 ], b"")
1008 def perfphases(ui, repo, **opts):
1020 def perfphases(ui, repo, **opts):
1009 """benchmark phasesets computation"""
1021 """benchmark phasesets computation"""
1010 opts = _byteskwargs(opts)
1022 opts = _byteskwargs(opts)
1011 timer, fm = gettimer(ui, opts)
1023 timer, fm = gettimer(ui, opts)
1012 _phases = repo._phasecache
1024 _phases = repo._phasecache
1013 full = opts.get(b'full')
1025 full = opts.get(b'full')
1014 def d():
1026 def d():
1015 phases = _phases
1027 phases = _phases
1016 if full:
1028 if full:
1017 clearfilecache(repo, b'_phasecache')
1029 clearfilecache(repo, b'_phasecache')
1018 phases = repo._phasecache
1030 phases = repo._phasecache
1019 phases.invalidate()
1031 phases.invalidate()
1020 phases.loadphaserevs(repo)
1032 phases.loadphaserevs(repo)
1021 timer(d)
1033 timer(d)
1022 fm.end()
1034 fm.end()
1023
1035
1024 @command(b'perfphasesremote',
1036 @command(b'perfphasesremote',
1025 [], b"[DEST]")
1037 [], b"[DEST]")
1026 def perfphasesremote(ui, repo, dest=None, **opts):
1038 def perfphasesremote(ui, repo, dest=None, **opts):
1027 """benchmark time needed to analyse phases of the remote server"""
1039 """benchmark time needed to analyse phases of the remote server"""
1028 from mercurial.node import (
1040 from mercurial.node import (
1029 bin,
1041 bin,
1030 )
1042 )
1031 from mercurial import (
1043 from mercurial import (
1032 exchange,
1044 exchange,
1033 hg,
1045 hg,
1034 phases,
1046 phases,
1035 )
1047 )
1036 opts = _byteskwargs(opts)
1048 opts = _byteskwargs(opts)
1037 timer, fm = gettimer(ui, opts)
1049 timer, fm = gettimer(ui, opts)
1038
1050
1039 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
1051 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
1040 if not path:
1052 if not path:
1041 raise error.Abort((b'default repository not configured!'),
1053 raise error.Abort((b'default repository not configured!'),
1042 hint=(b"see 'hg help config.paths'"))
1054 hint=(b"see 'hg help config.paths'"))
1043 dest = path.pushloc or path.loc
1055 dest = path.pushloc or path.loc
1044 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
1056 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
1045 other = hg.peer(repo, opts, dest)
1057 other = hg.peer(repo, opts, dest)
1046
1058
1047 # easier to perform discovery through the operation
1059 # easier to perform discovery through the operation
1048 op = exchange.pushoperation(repo, other)
1060 op = exchange.pushoperation(repo, other)
1049 exchange._pushdiscoverychangeset(op)
1061 exchange._pushdiscoverychangeset(op)
1050
1062
1051 remotesubset = op.fallbackheads
1063 remotesubset = op.fallbackheads
1052
1064
1053 with other.commandexecutor() as e:
1065 with other.commandexecutor() as e:
1054 remotephases = e.callcommand(b'listkeys',
1066 remotephases = e.callcommand(b'listkeys',
1055 {b'namespace': b'phases'}).result()
1067 {b'namespace': b'phases'}).result()
1056 del other
1068 del other
1057 publishing = remotephases.get(b'publishing', False)
1069 publishing = remotephases.get(b'publishing', False)
1058 if publishing:
1070 if publishing:
1059 ui.status((b'publishing: yes\n'))
1071 ui.status((b'publishing: yes\n'))
1060 else:
1072 else:
1061 ui.status((b'publishing: no\n'))
1073 ui.status((b'publishing: no\n'))
1062
1074
1063 nodemap = repo.changelog.nodemap
1075 nodemap = repo.changelog.nodemap
1064 nonpublishroots = 0
1076 nonpublishroots = 0
1065 for nhex, phase in remotephases.iteritems():
1077 for nhex, phase in remotephases.iteritems():
1066 if nhex == b'publishing': # ignore data related to publish option
1078 if nhex == b'publishing': # ignore data related to publish option
1067 continue
1079 continue
1068 node = bin(nhex)
1080 node = bin(nhex)
1069 if node in nodemap and int(phase):
1081 if node in nodemap and int(phase):
1070 nonpublishroots += 1
1082 nonpublishroots += 1
1071 ui.status((b'number of roots: %d\n') % len(remotephases))
1083 ui.status((b'number of roots: %d\n') % len(remotephases))
1072 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
1084 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
1073 def d():
1085 def d():
1074 phases.remotephasessummary(repo,
1086 phases.remotephasessummary(repo,
1075 remotesubset,
1087 remotesubset,
1076 remotephases)
1088 remotephases)
1077 timer(d)
1089 timer(d)
1078 fm.end()
1090 fm.end()
1079
1091
1080 @command(b'perfmanifest',[
1092 @command(b'perfmanifest',[
1081 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1093 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1082 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1094 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1083 ] + formatteropts, b'REV|NODE')
1095 ] + formatteropts, b'REV|NODE')
1084 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1096 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1085 """benchmark the time to read a manifest from disk and return a usable
1097 """benchmark the time to read a manifest from disk and return a usable
1086 dict-like object
1098 dict-like object
1087
1099
1088 Manifest caches are cleared before retrieval."""
1100 Manifest caches are cleared before retrieval."""
1089 opts = _byteskwargs(opts)
1101 opts = _byteskwargs(opts)
1090 timer, fm = gettimer(ui, opts)
1102 timer, fm = gettimer(ui, opts)
1091 if not manifest_rev:
1103 if not manifest_rev:
1092 ctx = scmutil.revsingle(repo, rev, rev)
1104 ctx = scmutil.revsingle(repo, rev, rev)
1093 t = ctx.manifestnode()
1105 t = ctx.manifestnode()
1094 else:
1106 else:
1095 from mercurial.node import bin
1107 from mercurial.node import bin
1096
1108
1097 if len(rev) == 40:
1109 if len(rev) == 40:
1098 t = bin(rev)
1110 t = bin(rev)
1099 else:
1111 else:
1100 try:
1112 try:
1101 rev = int(rev)
1113 rev = int(rev)
1102
1114
1103 if util.safehasattr(repo.manifestlog, b'getstorage'):
1115 if util.safehasattr(repo.manifestlog, b'getstorage'):
1104 t = repo.manifestlog.getstorage(b'').node(rev)
1116 t = repo.manifestlog.getstorage(b'').node(rev)
1105 else:
1117 else:
1106 t = repo.manifestlog._revlog.lookup(rev)
1118 t = repo.manifestlog._revlog.lookup(rev)
1107 except ValueError:
1119 except ValueError:
1108 raise error.Abort(b'manifest revision must be integer or full '
1120 raise error.Abort(b'manifest revision must be integer or full '
1109 b'node')
1121 b'node')
1110 def d():
1122 def d():
1111 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1123 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1112 repo.manifestlog[t].read()
1124 repo.manifestlog[t].read()
1113 timer(d)
1125 timer(d)
1114 fm.end()
1126 fm.end()
1115
1127
1116 @command(b'perfchangeset', formatteropts)
1128 @command(b'perfchangeset', formatteropts)
1117 def perfchangeset(ui, repo, rev, **opts):
1129 def perfchangeset(ui, repo, rev, **opts):
1118 opts = _byteskwargs(opts)
1130 opts = _byteskwargs(opts)
1119 timer, fm = gettimer(ui, opts)
1131 timer, fm = gettimer(ui, opts)
1120 n = scmutil.revsingle(repo, rev).node()
1132 n = scmutil.revsingle(repo, rev).node()
1121 def d():
1133 def d():
1122 repo.changelog.read(n)
1134 repo.changelog.read(n)
1123 #repo.changelog._cache = None
1135 #repo.changelog._cache = None
1124 timer(d)
1136 timer(d)
1125 fm.end()
1137 fm.end()
1126
1138
1127 @command(b'perfignore', formatteropts)
1139 @command(b'perfignore', formatteropts)
1128 def perfignore(ui, repo, **opts):
1140 def perfignore(ui, repo, **opts):
1129 """benchmark operation related to computing ignore"""
1141 """benchmark operation related to computing ignore"""
1130 opts = _byteskwargs(opts)
1142 opts = _byteskwargs(opts)
1131 timer, fm = gettimer(ui, opts)
1143 timer, fm = gettimer(ui, opts)
1132 dirstate = repo.dirstate
1144 dirstate = repo.dirstate
1133
1145
1134 def setupone():
1146 def setupone():
1135 dirstate.invalidate()
1147 dirstate.invalidate()
1136 clearfilecache(dirstate, b'_ignore')
1148 clearfilecache(dirstate, b'_ignore')
1137
1149
1138 def runone():
1150 def runone():
1139 dirstate._ignore
1151 dirstate._ignore
1140
1152
1141 timer(runone, setup=setupone, title=b"load")
1153 timer(runone, setup=setupone, title=b"load")
1142 fm.end()
1154 fm.end()
1143
1155
1144 @command(b'perfindex', [
1156 @command(b'perfindex', [
1145 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1157 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1146 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1158 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1147 ] + formatteropts)
1159 ] + formatteropts)
1148 def perfindex(ui, repo, **opts):
1160 def perfindex(ui, repo, **opts):
1149 """benchmark index creation time followed by a lookup
1161 """benchmark index creation time followed by a lookup
1150
1162
1151 The default is to look `tip` up. Depending on the index implementation,
1163 The default is to look `tip` up. Depending on the index implementation,
1152 the revision looked up can matters. For example, an implementation
1164 the revision looked up can matters. For example, an implementation
1153 scanning the index will have a faster lookup time for `--rev tip` than for
1165 scanning the index will have a faster lookup time for `--rev tip` than for
1154 `--rev 0`. The number of looked up revisions and their order can also
1166 `--rev 0`. The number of looked up revisions and their order can also
1155 matters.
1167 matters.
1156
1168
1157 Example of useful set to test:
1169 Example of useful set to test:
1158 * tip
1170 * tip
1159 * 0
1171 * 0
1160 * -10:
1172 * -10:
1161 * :10
1173 * :10
1162 * -10: + :10
1174 * -10: + :10
1163 * :10: + -10:
1175 * :10: + -10:
1164 * -10000:
1176 * -10000:
1165 * -10000: + 0
1177 * -10000: + 0
1166
1178
1167 It is not currently possible to check for lookup of a missing node. For
1179 It is not currently possible to check for lookup of a missing node. For
1168 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1180 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1169 import mercurial.revlog
1181 import mercurial.revlog
1170 opts = _byteskwargs(opts)
1182 opts = _byteskwargs(opts)
1171 timer, fm = gettimer(ui, opts)
1183 timer, fm = gettimer(ui, opts)
1172 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1184 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1173 if opts[b'no_lookup']:
1185 if opts[b'no_lookup']:
1174 if opts['rev']:
1186 if opts['rev']:
1175 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1187 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1176 nodes = []
1188 nodes = []
1177 elif not opts[b'rev']:
1189 elif not opts[b'rev']:
1178 nodes = [repo[b"tip"].node()]
1190 nodes = [repo[b"tip"].node()]
1179 else:
1191 else:
1180 revs = scmutil.revrange(repo, opts[b'rev'])
1192 revs = scmutil.revrange(repo, opts[b'rev'])
1181 cl = repo.changelog
1193 cl = repo.changelog
1182 nodes = [cl.node(r) for r in revs]
1194 nodes = [cl.node(r) for r in revs]
1183
1195
1184 unfi = repo.unfiltered()
1196 unfi = repo.unfiltered()
1185 # find the filecache func directly
1197 # find the filecache func directly
1186 # This avoid polluting the benchmark with the filecache logic
1198 # This avoid polluting the benchmark with the filecache logic
1187 makecl = unfi.__class__.changelog.func
1199 makecl = unfi.__class__.changelog.func
1188 def setup():
1200 def setup():
1189 # probably not necessary, but for good measure
1201 # probably not necessary, but for good measure
1190 clearchangelog(unfi)
1202 clearchangelog(unfi)
1191 def d():
1203 def d():
1192 cl = makecl(unfi)
1204 cl = makecl(unfi)
1193 for n in nodes:
1205 for n in nodes:
1194 cl.rev(n)
1206 cl.rev(n)
1195 timer(d, setup=setup)
1207 timer(d, setup=setup)
1196 fm.end()
1208 fm.end()
1197
1209
1198 @command(b'perfnodemap', [
1210 @command(b'perfnodemap', [
1199 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1211 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1200 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1212 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1201 ] + formatteropts)
1213 ] + formatteropts)
1202 def perfnodemap(ui, repo, **opts):
1214 def perfnodemap(ui, repo, **opts):
1203 """benchmark the time necessary to look up revision from a cold nodemap
1215 """benchmark the time necessary to look up revision from a cold nodemap
1204
1216
1205 Depending on the implementation, the amount and order of revision we look
1217 Depending on the implementation, the amount and order of revision we look
1206 up can varies. Example of useful set to test:
1218 up can varies. Example of useful set to test:
1207 * tip
1219 * tip
1208 * 0
1220 * 0
1209 * -10:
1221 * -10:
1210 * :10
1222 * :10
1211 * -10: + :10
1223 * -10: + :10
1212 * :10: + -10:
1224 * :10: + -10:
1213 * -10000:
1225 * -10000:
1214 * -10000: + 0
1226 * -10000: + 0
1215
1227
1216 The command currently focus on valid binary lookup. Benchmarking for
1228 The command currently focus on valid binary lookup. Benchmarking for
1217 hexlookup, prefix lookup and missing lookup would also be valuable.
1229 hexlookup, prefix lookup and missing lookup would also be valuable.
1218 """
1230 """
1219 import mercurial.revlog
1231 import mercurial.revlog
1220 opts = _byteskwargs(opts)
1232 opts = _byteskwargs(opts)
1221 timer, fm = gettimer(ui, opts)
1233 timer, fm = gettimer(ui, opts)
1222 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1234 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1223
1235
1224 unfi = repo.unfiltered()
1236 unfi = repo.unfiltered()
1225 clearcaches = opts['clear_caches']
1237 clearcaches = opts['clear_caches']
1226 # find the filecache func directly
1238 # find the filecache func directly
1227 # This avoid polluting the benchmark with the filecache logic
1239 # This avoid polluting the benchmark with the filecache logic
1228 makecl = unfi.__class__.changelog.func
1240 makecl = unfi.__class__.changelog.func
1229 if not opts[b'rev']:
1241 if not opts[b'rev']:
1230 raise error.Abort('use --rev to specify revisions to look up')
1242 raise error.Abort('use --rev to specify revisions to look up')
1231 revs = scmutil.revrange(repo, opts[b'rev'])
1243 revs = scmutil.revrange(repo, opts[b'rev'])
1232 cl = repo.changelog
1244 cl = repo.changelog
1233 nodes = [cl.node(r) for r in revs]
1245 nodes = [cl.node(r) for r in revs]
1234
1246
1235 # use a list to pass reference to a nodemap from one closure to the next
1247 # use a list to pass reference to a nodemap from one closure to the next
1236 nodeget = [None]
1248 nodeget = [None]
1237 def setnodeget():
1249 def setnodeget():
1238 # probably not necessary, but for good measure
1250 # probably not necessary, but for good measure
1239 clearchangelog(unfi)
1251 clearchangelog(unfi)
1240 nodeget[0] = makecl(unfi).nodemap.get
1252 nodeget[0] = makecl(unfi).nodemap.get
1241
1253
1242 def d():
1254 def d():
1243 get = nodeget[0]
1255 get = nodeget[0]
1244 for n in nodes:
1256 for n in nodes:
1245 get(n)
1257 get(n)
1246
1258
1247 setup = None
1259 setup = None
1248 if clearcaches:
1260 if clearcaches:
1249 def setup():
1261 def setup():
1250 setnodeget()
1262 setnodeget()
1251 else:
1263 else:
1252 setnodeget()
1264 setnodeget()
1253 d() # prewarm the data structure
1265 d() # prewarm the data structure
1254 timer(d, setup=setup)
1266 timer(d, setup=setup)
1255 fm.end()
1267 fm.end()
1256
1268
1257 @command(b'perfstartup', formatteropts)
1269 @command(b'perfstartup', formatteropts)
1258 def perfstartup(ui, repo, **opts):
1270 def perfstartup(ui, repo, **opts):
1259 opts = _byteskwargs(opts)
1271 opts = _byteskwargs(opts)
1260 timer, fm = gettimer(ui, opts)
1272 timer, fm = gettimer(ui, opts)
1261 def d():
1273 def d():
1262 if os.name != r'nt':
1274 if os.name != r'nt':
1263 os.system(b"HGRCPATH= %s version -q > /dev/null" %
1275 os.system(b"HGRCPATH= %s version -q > /dev/null" %
1264 fsencode(sys.argv[0]))
1276 fsencode(sys.argv[0]))
1265 else:
1277 else:
1266 os.environ[r'HGRCPATH'] = r' '
1278 os.environ[r'HGRCPATH'] = r' '
1267 os.system(r"%s version -q > NUL" % sys.argv[0])
1279 os.system(r"%s version -q > NUL" % sys.argv[0])
1268 timer(d)
1280 timer(d)
1269 fm.end()
1281 fm.end()
1270
1282
1271 @command(b'perfparents', formatteropts)
1283 @command(b'perfparents', formatteropts)
1272 def perfparents(ui, repo, **opts):
1284 def perfparents(ui, repo, **opts):
1273 """benchmark the time necessary to fetch one changeset's parents.
1285 """benchmark the time necessary to fetch one changeset's parents.
1274
1286
1275 The fetch is done using the `node identifier`, traversing all object layers
1287 The fetch is done using the `node identifier`, traversing all object layers
1276 from the repository object. The first N revisions will be used for this
1288 from the repository object. The first N revisions will be used for this
1277 benchmark. N is controlled by the ``perf.parentscount`` config option
1289 benchmark. N is controlled by the ``perf.parentscount`` config option
1278 (default: 1000).
1290 (default: 1000).
1279 """
1291 """
1280 opts = _byteskwargs(opts)
1292 opts = _byteskwargs(opts)
1281 timer, fm = gettimer(ui, opts)
1293 timer, fm = gettimer(ui, opts)
1282 # control the number of commits perfparents iterates over
1294 # control the number of commits perfparents iterates over
1283 # experimental config: perf.parentscount
1295 # experimental config: perf.parentscount
1284 count = getint(ui, b"perf", b"parentscount", 1000)
1296 count = getint(ui, b"perf", b"parentscount", 1000)
1285 if len(repo.changelog) < count:
1297 if len(repo.changelog) < count:
1286 raise error.Abort(b"repo needs %d commits for this test" % count)
1298 raise error.Abort(b"repo needs %d commits for this test" % count)
1287 repo = repo.unfiltered()
1299 repo = repo.unfiltered()
1288 nl = [repo.changelog.node(i) for i in _xrange(count)]
1300 nl = [repo.changelog.node(i) for i in _xrange(count)]
1289 def d():
1301 def d():
1290 for n in nl:
1302 for n in nl:
1291 repo.changelog.parents(n)
1303 repo.changelog.parents(n)
1292 timer(d)
1304 timer(d)
1293 fm.end()
1305 fm.end()
1294
1306
1295 @command(b'perfctxfiles', formatteropts)
1307 @command(b'perfctxfiles', formatteropts)
1296 def perfctxfiles(ui, repo, x, **opts):
1308 def perfctxfiles(ui, repo, x, **opts):
1297 opts = _byteskwargs(opts)
1309 opts = _byteskwargs(opts)
1298 x = int(x)
1310 x = int(x)
1299 timer, fm = gettimer(ui, opts)
1311 timer, fm = gettimer(ui, opts)
1300 def d():
1312 def d():
1301 len(repo[x].files())
1313 len(repo[x].files())
1302 timer(d)
1314 timer(d)
1303 fm.end()
1315 fm.end()
1304
1316
1305 @command(b'perfrawfiles', formatteropts)
1317 @command(b'perfrawfiles', formatteropts)
1306 def perfrawfiles(ui, repo, x, **opts):
1318 def perfrawfiles(ui, repo, x, **opts):
1307 opts = _byteskwargs(opts)
1319 opts = _byteskwargs(opts)
1308 x = int(x)
1320 x = int(x)
1309 timer, fm = gettimer(ui, opts)
1321 timer, fm = gettimer(ui, opts)
1310 cl = repo.changelog
1322 cl = repo.changelog
1311 def d():
1323 def d():
1312 len(cl.read(x)[3])
1324 len(cl.read(x)[3])
1313 timer(d)
1325 timer(d)
1314 fm.end()
1326 fm.end()
1315
1327
1316 @command(b'perflookup', formatteropts)
1328 @command(b'perflookup', formatteropts)
1317 def perflookup(ui, repo, rev, **opts):
1329 def perflookup(ui, repo, rev, **opts):
1318 opts = _byteskwargs(opts)
1330 opts = _byteskwargs(opts)
1319 timer, fm = gettimer(ui, opts)
1331 timer, fm = gettimer(ui, opts)
1320 timer(lambda: len(repo.lookup(rev)))
1332 timer(lambda: len(repo.lookup(rev)))
1321 fm.end()
1333 fm.end()
1322
1334
1323 @command(b'perflinelogedits',
1335 @command(b'perflinelogedits',
1324 [(b'n', b'edits', 10000, b'number of edits'),
1336 [(b'n', b'edits', 10000, b'number of edits'),
1325 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1337 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1326 ], norepo=True)
1338 ], norepo=True)
1327 def perflinelogedits(ui, **opts):
1339 def perflinelogedits(ui, **opts):
1328 from mercurial import linelog
1340 from mercurial import linelog
1329
1341
1330 opts = _byteskwargs(opts)
1342 opts = _byteskwargs(opts)
1331
1343
1332 edits = opts[b'edits']
1344 edits = opts[b'edits']
1333 maxhunklines = opts[b'max_hunk_lines']
1345 maxhunklines = opts[b'max_hunk_lines']
1334
1346
1335 maxb1 = 100000
1347 maxb1 = 100000
1336 random.seed(0)
1348 random.seed(0)
1337 randint = random.randint
1349 randint = random.randint
1338 currentlines = 0
1350 currentlines = 0
1339 arglist = []
1351 arglist = []
1340 for rev in _xrange(edits):
1352 for rev in _xrange(edits):
1341 a1 = randint(0, currentlines)
1353 a1 = randint(0, currentlines)
1342 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1354 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1343 b1 = randint(0, maxb1)
1355 b1 = randint(0, maxb1)
1344 b2 = randint(b1, b1 + maxhunklines)
1356 b2 = randint(b1, b1 + maxhunklines)
1345 currentlines += (b2 - b1) - (a2 - a1)
1357 currentlines += (b2 - b1) - (a2 - a1)
1346 arglist.append((rev, a1, a2, b1, b2))
1358 arglist.append((rev, a1, a2, b1, b2))
1347
1359
1348 def d():
1360 def d():
1349 ll = linelog.linelog()
1361 ll = linelog.linelog()
1350 for args in arglist:
1362 for args in arglist:
1351 ll.replacelines(*args)
1363 ll.replacelines(*args)
1352
1364
1353 timer, fm = gettimer(ui, opts)
1365 timer, fm = gettimer(ui, opts)
1354 timer(d)
1366 timer(d)
1355 fm.end()
1367 fm.end()
1356
1368
1357 @command(b'perfrevrange', formatteropts)
1369 @command(b'perfrevrange', formatteropts)
1358 def perfrevrange(ui, repo, *specs, **opts):
1370 def perfrevrange(ui, repo, *specs, **opts):
1359 opts = _byteskwargs(opts)
1371 opts = _byteskwargs(opts)
1360 timer, fm = gettimer(ui, opts)
1372 timer, fm = gettimer(ui, opts)
1361 revrange = scmutil.revrange
1373 revrange = scmutil.revrange
1362 timer(lambda: len(revrange(repo, specs)))
1374 timer(lambda: len(revrange(repo, specs)))
1363 fm.end()
1375 fm.end()
1364
1376
1365 @command(b'perfnodelookup', formatteropts)
1377 @command(b'perfnodelookup', formatteropts)
1366 def perfnodelookup(ui, repo, rev, **opts):
1378 def perfnodelookup(ui, repo, rev, **opts):
1367 opts = _byteskwargs(opts)
1379 opts = _byteskwargs(opts)
1368 timer, fm = gettimer(ui, opts)
1380 timer, fm = gettimer(ui, opts)
1369 import mercurial.revlog
1381 import mercurial.revlog
1370 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1382 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1371 n = scmutil.revsingle(repo, rev).node()
1383 n = scmutil.revsingle(repo, rev).node()
1372 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1384 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1373 def d():
1385 def d():
1374 cl.rev(n)
1386 cl.rev(n)
1375 clearcaches(cl)
1387 clearcaches(cl)
1376 timer(d)
1388 timer(d)
1377 fm.end()
1389 fm.end()
1378
1390
1379 @command(b'perflog',
1391 @command(b'perflog',
1380 [(b'', b'rename', False, b'ask log to follow renames')
1392 [(b'', b'rename', False, b'ask log to follow renames')
1381 ] + formatteropts)
1393 ] + formatteropts)
1382 def perflog(ui, repo, rev=None, **opts):
1394 def perflog(ui, repo, rev=None, **opts):
1383 opts = _byteskwargs(opts)
1395 opts = _byteskwargs(opts)
1384 if rev is None:
1396 if rev is None:
1385 rev=[]
1397 rev=[]
1386 timer, fm = gettimer(ui, opts)
1398 timer, fm = gettimer(ui, opts)
1387 ui.pushbuffer()
1399 ui.pushbuffer()
1388 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1400 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1389 copies=opts.get(b'rename')))
1401 copies=opts.get(b'rename')))
1390 ui.popbuffer()
1402 ui.popbuffer()
1391 fm.end()
1403 fm.end()
1392
1404
1393 @command(b'perfmoonwalk', formatteropts)
1405 @command(b'perfmoonwalk', formatteropts)
1394 def perfmoonwalk(ui, repo, **opts):
1406 def perfmoonwalk(ui, repo, **opts):
1395 """benchmark walking the changelog backwards
1407 """benchmark walking the changelog backwards
1396
1408
1397 This also loads the changelog data for each revision in the changelog.
1409 This also loads the changelog data for each revision in the changelog.
1398 """
1410 """
1399 opts = _byteskwargs(opts)
1411 opts = _byteskwargs(opts)
1400 timer, fm = gettimer(ui, opts)
1412 timer, fm = gettimer(ui, opts)
1401 def moonwalk():
1413 def moonwalk():
1402 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1414 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1403 ctx = repo[i]
1415 ctx = repo[i]
1404 ctx.branch() # read changelog data (in addition to the index)
1416 ctx.branch() # read changelog data (in addition to the index)
1405 timer(moonwalk)
1417 timer(moonwalk)
1406 fm.end()
1418 fm.end()
1407
1419
1408 @command(b'perftemplating',
1420 @command(b'perftemplating',
1409 [(b'r', b'rev', [], b'revisions to run the template on'),
1421 [(b'r', b'rev', [], b'revisions to run the template on'),
1410 ] + formatteropts)
1422 ] + formatteropts)
1411 def perftemplating(ui, repo, testedtemplate=None, **opts):
1423 def perftemplating(ui, repo, testedtemplate=None, **opts):
1412 """test the rendering time of a given template"""
1424 """test the rendering time of a given template"""
1413 if makelogtemplater is None:
1425 if makelogtemplater is None:
1414 raise error.Abort((b"perftemplating not available with this Mercurial"),
1426 raise error.Abort((b"perftemplating not available with this Mercurial"),
1415 hint=b"use 4.3 or later")
1427 hint=b"use 4.3 or later")
1416
1428
1417 opts = _byteskwargs(opts)
1429 opts = _byteskwargs(opts)
1418
1430
1419 nullui = ui.copy()
1431 nullui = ui.copy()
1420 nullui.fout = open(os.devnull, r'wb')
1432 nullui.fout = open(os.devnull, r'wb')
1421 nullui.disablepager()
1433 nullui.disablepager()
1422 revs = opts.get(b'rev')
1434 revs = opts.get(b'rev')
1423 if not revs:
1435 if not revs:
1424 revs = [b'all()']
1436 revs = [b'all()']
1425 revs = list(scmutil.revrange(repo, revs))
1437 revs = list(scmutil.revrange(repo, revs))
1426
1438
1427 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1439 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1428 b' {author|person}: {desc|firstline}\n')
1440 b' {author|person}: {desc|firstline}\n')
1429 if testedtemplate is None:
1441 if testedtemplate is None:
1430 testedtemplate = defaulttemplate
1442 testedtemplate = defaulttemplate
1431 displayer = makelogtemplater(nullui, repo, testedtemplate)
1443 displayer = makelogtemplater(nullui, repo, testedtemplate)
1432 def format():
1444 def format():
1433 for r in revs:
1445 for r in revs:
1434 ctx = repo[r]
1446 ctx = repo[r]
1435 displayer.show(ctx)
1447 displayer.show(ctx)
1436 displayer.flush(ctx)
1448 displayer.flush(ctx)
1437
1449
1438 timer, fm = gettimer(ui, opts)
1450 timer, fm = gettimer(ui, opts)
1439 timer(format)
1451 timer(format)
1440 fm.end()
1452 fm.end()
1441
1453
1442 @command(b'perfhelper-pathcopies', formatteropts +
1454 @command(b'perfhelper-pathcopies', formatteropts +
1443 [
1455 [
1444 (b'r', b'revs', [], b'restrict search to these revisions'),
1456 (b'r', b'revs', [], b'restrict search to these revisions'),
1445 (b'', b'timing', False, b'provides extra data (costly)'),
1457 (b'', b'timing', False, b'provides extra data (costly)'),
1446 ])
1458 ])
1447 def perfhelperpathcopies(ui, repo, revs=[], **opts):
1459 def perfhelperpathcopies(ui, repo, revs=[], **opts):
1448 """find statistic about potential parameters for the `perftracecopies`
1460 """find statistic about potential parameters for the `perftracecopies`
1449
1461
1450 This command find source-destination pair relevant for copytracing testing.
1462 This command find source-destination pair relevant for copytracing testing.
1451 It report value for some of the parameters that impact copy tracing time.
1463 It report value for some of the parameters that impact copy tracing time.
1452
1464
1453 If `--timing` is set, rename detection is run and the associated timing
1465 If `--timing` is set, rename detection is run and the associated timing
1454 will be reported. The extra details comes at the cost of a slower command
1466 will be reported. The extra details comes at the cost of a slower command
1455 execution.
1467 execution.
1456
1468
1457 Since the rename detection is only run once, other factors might easily
1469 Since the rename detection is only run once, other factors might easily
1458 affect the precision of the timing. However it should give a good
1470 affect the precision of the timing. However it should give a good
1459 approximation of which revision pairs are very costly.
1471 approximation of which revision pairs are very costly.
1460 """
1472 """
1461 opts = _byteskwargs(opts)
1473 opts = _byteskwargs(opts)
1462 fm = ui.formatter(b'perf', opts)
1474 fm = ui.formatter(b'perf', opts)
1463 dotiming = opts[b'timing']
1475 dotiming = opts[b'timing']
1464
1476
1465 if dotiming:
1477 if dotiming:
1466 header = '%12s %12s %12s %12s %12s %12s\n'
1478 header = '%12s %12s %12s %12s %12s %12s\n'
1467 output = ("%(source)12s %(destination)12s "
1479 output = ("%(source)12s %(destination)12s "
1468 "%(nbrevs)12d %(nbmissingfiles)12d "
1480 "%(nbrevs)12d %(nbmissingfiles)12d "
1469 "%(nbrenamedfiles)12d %(time)18.5f\n")
1481 "%(nbrenamedfiles)12d %(time)18.5f\n")
1470 header_names = ("source", "destination", "nb-revs", "nb-files",
1482 header_names = ("source", "destination", "nb-revs", "nb-files",
1471 "nb-renames", "time")
1483 "nb-renames", "time")
1472 fm.plain(header % header_names)
1484 fm.plain(header % header_names)
1473 else:
1485 else:
1474 header = '%12s %12s %12s %12s\n'
1486 header = '%12s %12s %12s %12s\n'
1475 output = ("%(source)12s %(destination)12s "
1487 output = ("%(source)12s %(destination)12s "
1476 "%(nbrevs)12d %(nbmissingfiles)12d\n")
1488 "%(nbrevs)12d %(nbmissingfiles)12d\n")
1477 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
1489 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
1478
1490
1479 if not revs:
1491 if not revs:
1480 revs = ['all()']
1492 revs = ['all()']
1481 revs = scmutil.revrange(repo, revs)
1493 revs = scmutil.revrange(repo, revs)
1482
1494
1483 roi = repo.revs('merge() and %ld', revs)
1495 roi = repo.revs('merge() and %ld', revs)
1484 for r in roi:
1496 for r in roi:
1485 ctx = repo[r]
1497 ctx = repo[r]
1486 p1 = ctx.p1().rev()
1498 p1 = ctx.p1().rev()
1487 p2 = ctx.p2().rev()
1499 p2 = ctx.p2().rev()
1488 bases = repo.changelog._commonancestorsheads(p1, p2)
1500 bases = repo.changelog._commonancestorsheads(p1, p2)
1489 for p in (p1, p2):
1501 for p in (p1, p2):
1490 for b in bases:
1502 for b in bases:
1491 base = repo[b]
1503 base = repo[b]
1492 parent = repo[p]
1504 parent = repo[p]
1493 missing = copies._computeforwardmissing(base, parent)
1505 missing = copies._computeforwardmissing(base, parent)
1494 if not missing:
1506 if not missing:
1495 continue
1507 continue
1496 data = {
1508 data = {
1497 b'source': base.hex(),
1509 b'source': base.hex(),
1498 b'destination': parent.hex(),
1510 b'destination': parent.hex(),
1499 b'nbrevs': len(repo.revs('%d::%d', b, p)),
1511 b'nbrevs': len(repo.revs('%d::%d', b, p)),
1500 b'nbmissingfiles': len(missing),
1512 b'nbmissingfiles': len(missing),
1501 }
1513 }
1502 if dotiming:
1514 if dotiming:
1503 begin = util.timer()
1515 begin = util.timer()
1504 renames = copies.pathcopies(base, parent)
1516 renames = copies.pathcopies(base, parent)
1505 end = util.timer()
1517 end = util.timer()
1506 # not very stable timing since we did only one run
1518 # not very stable timing since we did only one run
1507 data['time'] = end - begin
1519 data['time'] = end - begin
1508 data['nbrenamedfiles'] = len(renames)
1520 data['nbrenamedfiles'] = len(renames)
1509 fm.startitem()
1521 fm.startitem()
1510 fm.data(**data)
1522 fm.data(**data)
1511 out = data.copy()
1523 out = data.copy()
1512 out['source'] = fm.hexfunc(base.node())
1524 out['source'] = fm.hexfunc(base.node())
1513 out['destination'] = fm.hexfunc(parent.node())
1525 out['destination'] = fm.hexfunc(parent.node())
1514 fm.plain(output % out)
1526 fm.plain(output % out)
1515
1527
1516 fm.end()
1528 fm.end()
1517
1529
1518 @command(b'perfcca', formatteropts)
1530 @command(b'perfcca', formatteropts)
1519 def perfcca(ui, repo, **opts):
1531 def perfcca(ui, repo, **opts):
1520 opts = _byteskwargs(opts)
1532 opts = _byteskwargs(opts)
1521 timer, fm = gettimer(ui, opts)
1533 timer, fm = gettimer(ui, opts)
1522 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1534 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1523 fm.end()
1535 fm.end()
1524
1536
1525 @command(b'perffncacheload', formatteropts)
1537 @command(b'perffncacheload', formatteropts)
1526 def perffncacheload(ui, repo, **opts):
1538 def perffncacheload(ui, repo, **opts):
1527 opts = _byteskwargs(opts)
1539 opts = _byteskwargs(opts)
1528 timer, fm = gettimer(ui, opts)
1540 timer, fm = gettimer(ui, opts)
1529 s = repo.store
1541 s = repo.store
1530 def d():
1542 def d():
1531 s.fncache._load()
1543 s.fncache._load()
1532 timer(d)
1544 timer(d)
1533 fm.end()
1545 fm.end()
1534
1546
1535 @command(b'perffncachewrite', formatteropts)
1547 @command(b'perffncachewrite', formatteropts)
1536 def perffncachewrite(ui, repo, **opts):
1548 def perffncachewrite(ui, repo, **opts):
1537 opts = _byteskwargs(opts)
1549 opts = _byteskwargs(opts)
1538 timer, fm = gettimer(ui, opts)
1550 timer, fm = gettimer(ui, opts)
1539 s = repo.store
1551 s = repo.store
1540 lock = repo.lock()
1552 lock = repo.lock()
1541 s.fncache._load()
1553 s.fncache._load()
1542 tr = repo.transaction(b'perffncachewrite')
1554 tr = repo.transaction(b'perffncachewrite')
1543 tr.addbackup(b'fncache')
1555 tr.addbackup(b'fncache')
1544 def d():
1556 def d():
1545 s.fncache._dirty = True
1557 s.fncache._dirty = True
1546 s.fncache.write(tr)
1558 s.fncache.write(tr)
1547 timer(d)
1559 timer(d)
1548 tr.close()
1560 tr.close()
1549 lock.release()
1561 lock.release()
1550 fm.end()
1562 fm.end()
1551
1563
1552 @command(b'perffncacheencode', formatteropts)
1564 @command(b'perffncacheencode', formatteropts)
1553 def perffncacheencode(ui, repo, **opts):
1565 def perffncacheencode(ui, repo, **opts):
1554 opts = _byteskwargs(opts)
1566 opts = _byteskwargs(opts)
1555 timer, fm = gettimer(ui, opts)
1567 timer, fm = gettimer(ui, opts)
1556 s = repo.store
1568 s = repo.store
1557 s.fncache._load()
1569 s.fncache._load()
1558 def d():
1570 def d():
1559 for p in s.fncache.entries:
1571 for p in s.fncache.entries:
1560 s.encode(p)
1572 s.encode(p)
1561 timer(d)
1573 timer(d)
1562 fm.end()
1574 fm.end()
1563
1575
1564 def _bdiffworker(q, blocks, xdiff, ready, done):
1576 def _bdiffworker(q, blocks, xdiff, ready, done):
1565 while not done.is_set():
1577 while not done.is_set():
1566 pair = q.get()
1578 pair = q.get()
1567 while pair is not None:
1579 while pair is not None:
1568 if xdiff:
1580 if xdiff:
1569 mdiff.bdiff.xdiffblocks(*pair)
1581 mdiff.bdiff.xdiffblocks(*pair)
1570 elif blocks:
1582 elif blocks:
1571 mdiff.bdiff.blocks(*pair)
1583 mdiff.bdiff.blocks(*pair)
1572 else:
1584 else:
1573 mdiff.textdiff(*pair)
1585 mdiff.textdiff(*pair)
1574 q.task_done()
1586 q.task_done()
1575 pair = q.get()
1587 pair = q.get()
1576 q.task_done() # for the None one
1588 q.task_done() # for the None one
1577 with ready:
1589 with ready:
1578 ready.wait()
1590 ready.wait()
1579
1591
1580 def _manifestrevision(repo, mnode):
1592 def _manifestrevision(repo, mnode):
1581 ml = repo.manifestlog
1593 ml = repo.manifestlog
1582
1594
1583 if util.safehasattr(ml, b'getstorage'):
1595 if util.safehasattr(ml, b'getstorage'):
1584 store = ml.getstorage(b'')
1596 store = ml.getstorage(b'')
1585 else:
1597 else:
1586 store = ml._revlog
1598 store = ml._revlog
1587
1599
1588 return store.revision(mnode)
1600 return store.revision(mnode)
1589
1601
1590 @command(b'perfbdiff', revlogopts + formatteropts + [
1602 @command(b'perfbdiff', revlogopts + formatteropts + [
1591 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1603 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1592 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1604 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1593 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1605 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1594 (b'', b'blocks', False, b'test computing diffs into blocks'),
1606 (b'', b'blocks', False, b'test computing diffs into blocks'),
1595 (b'', b'xdiff', False, b'use xdiff algorithm'),
1607 (b'', b'xdiff', False, b'use xdiff algorithm'),
1596 ],
1608 ],
1597
1609
1598 b'-c|-m|FILE REV')
1610 b'-c|-m|FILE REV')
1599 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1611 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1600 """benchmark a bdiff between revisions
1612 """benchmark a bdiff between revisions
1601
1613
1602 By default, benchmark a bdiff between its delta parent and itself.
1614 By default, benchmark a bdiff between its delta parent and itself.
1603
1615
1604 With ``--count``, benchmark bdiffs between delta parents and self for N
1616 With ``--count``, benchmark bdiffs between delta parents and self for N
1605 revisions starting at the specified revision.
1617 revisions starting at the specified revision.
1606
1618
1607 With ``--alldata``, assume the requested revision is a changeset and
1619 With ``--alldata``, assume the requested revision is a changeset and
1608 measure bdiffs for all changes related to that changeset (manifest
1620 measure bdiffs for all changes related to that changeset (manifest
1609 and filelogs).
1621 and filelogs).
1610 """
1622 """
1611 opts = _byteskwargs(opts)
1623 opts = _byteskwargs(opts)
1612
1624
1613 if opts[b'xdiff'] and not opts[b'blocks']:
1625 if opts[b'xdiff'] and not opts[b'blocks']:
1614 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1626 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1615
1627
1616 if opts[b'alldata']:
1628 if opts[b'alldata']:
1617 opts[b'changelog'] = True
1629 opts[b'changelog'] = True
1618
1630
1619 if opts.get(b'changelog') or opts.get(b'manifest'):
1631 if opts.get(b'changelog') or opts.get(b'manifest'):
1620 file_, rev = None, file_
1632 file_, rev = None, file_
1621 elif rev is None:
1633 elif rev is None:
1622 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1634 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1623
1635
1624 blocks = opts[b'blocks']
1636 blocks = opts[b'blocks']
1625 xdiff = opts[b'xdiff']
1637 xdiff = opts[b'xdiff']
1626 textpairs = []
1638 textpairs = []
1627
1639
1628 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1640 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1629
1641
1630 startrev = r.rev(r.lookup(rev))
1642 startrev = r.rev(r.lookup(rev))
1631 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1643 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1632 if opts[b'alldata']:
1644 if opts[b'alldata']:
1633 # Load revisions associated with changeset.
1645 # Load revisions associated with changeset.
1634 ctx = repo[rev]
1646 ctx = repo[rev]
1635 mtext = _manifestrevision(repo, ctx.manifestnode())
1647 mtext = _manifestrevision(repo, ctx.manifestnode())
1636 for pctx in ctx.parents():
1648 for pctx in ctx.parents():
1637 pman = _manifestrevision(repo, pctx.manifestnode())
1649 pman = _manifestrevision(repo, pctx.manifestnode())
1638 textpairs.append((pman, mtext))
1650 textpairs.append((pman, mtext))
1639
1651
1640 # Load filelog revisions by iterating manifest delta.
1652 # Load filelog revisions by iterating manifest delta.
1641 man = ctx.manifest()
1653 man = ctx.manifest()
1642 pman = ctx.p1().manifest()
1654 pman = ctx.p1().manifest()
1643 for filename, change in pman.diff(man).items():
1655 for filename, change in pman.diff(man).items():
1644 fctx = repo.file(filename)
1656 fctx = repo.file(filename)
1645 f1 = fctx.revision(change[0][0] or -1)
1657 f1 = fctx.revision(change[0][0] or -1)
1646 f2 = fctx.revision(change[1][0] or -1)
1658 f2 = fctx.revision(change[1][0] or -1)
1647 textpairs.append((f1, f2))
1659 textpairs.append((f1, f2))
1648 else:
1660 else:
1649 dp = r.deltaparent(rev)
1661 dp = r.deltaparent(rev)
1650 textpairs.append((r.revision(dp), r.revision(rev)))
1662 textpairs.append((r.revision(dp), r.revision(rev)))
1651
1663
1652 withthreads = threads > 0
1664 withthreads = threads > 0
1653 if not withthreads:
1665 if not withthreads:
1654 def d():
1666 def d():
1655 for pair in textpairs:
1667 for pair in textpairs:
1656 if xdiff:
1668 if xdiff:
1657 mdiff.bdiff.xdiffblocks(*pair)
1669 mdiff.bdiff.xdiffblocks(*pair)
1658 elif blocks:
1670 elif blocks:
1659 mdiff.bdiff.blocks(*pair)
1671 mdiff.bdiff.blocks(*pair)
1660 else:
1672 else:
1661 mdiff.textdiff(*pair)
1673 mdiff.textdiff(*pair)
1662 else:
1674 else:
1663 q = queue()
1675 q = queue()
1664 for i in _xrange(threads):
1676 for i in _xrange(threads):
1665 q.put(None)
1677 q.put(None)
1666 ready = threading.Condition()
1678 ready = threading.Condition()
1667 done = threading.Event()
1679 done = threading.Event()
1668 for i in _xrange(threads):
1680 for i in _xrange(threads):
1669 threading.Thread(target=_bdiffworker,
1681 threading.Thread(target=_bdiffworker,
1670 args=(q, blocks, xdiff, ready, done)).start()
1682 args=(q, blocks, xdiff, ready, done)).start()
1671 q.join()
1683 q.join()
1672 def d():
1684 def d():
1673 for pair in textpairs:
1685 for pair in textpairs:
1674 q.put(pair)
1686 q.put(pair)
1675 for i in _xrange(threads):
1687 for i in _xrange(threads):
1676 q.put(None)
1688 q.put(None)
1677 with ready:
1689 with ready:
1678 ready.notify_all()
1690 ready.notify_all()
1679 q.join()
1691 q.join()
1680 timer, fm = gettimer(ui, opts)
1692 timer, fm = gettimer(ui, opts)
1681 timer(d)
1693 timer(d)
1682 fm.end()
1694 fm.end()
1683
1695
1684 if withthreads:
1696 if withthreads:
1685 done.set()
1697 done.set()
1686 for i in _xrange(threads):
1698 for i in _xrange(threads):
1687 q.put(None)
1699 q.put(None)
1688 with ready:
1700 with ready:
1689 ready.notify_all()
1701 ready.notify_all()
1690
1702
1691 @command(b'perfunidiff', revlogopts + formatteropts + [
1703 @command(b'perfunidiff', revlogopts + formatteropts + [
1692 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1704 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1693 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
1705 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
1694 ], b'-c|-m|FILE REV')
1706 ], b'-c|-m|FILE REV')
1695 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1707 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1696 """benchmark a unified diff between revisions
1708 """benchmark a unified diff between revisions
1697
1709
1698 This doesn't include any copy tracing - it's just a unified diff
1710 This doesn't include any copy tracing - it's just a unified diff
1699 of the texts.
1711 of the texts.
1700
1712
1701 By default, benchmark a diff between its delta parent and itself.
1713 By default, benchmark a diff between its delta parent and itself.
1702
1714
1703 With ``--count``, benchmark diffs between delta parents and self for N
1715 With ``--count``, benchmark diffs between delta parents and self for N
1704 revisions starting at the specified revision.
1716 revisions starting at the specified revision.
1705
1717
1706 With ``--alldata``, assume the requested revision is a changeset and
1718 With ``--alldata``, assume the requested revision is a changeset and
1707 measure diffs for all changes related to that changeset (manifest
1719 measure diffs for all changes related to that changeset (manifest
1708 and filelogs).
1720 and filelogs).
1709 """
1721 """
1710 opts = _byteskwargs(opts)
1722 opts = _byteskwargs(opts)
1711 if opts[b'alldata']:
1723 if opts[b'alldata']:
1712 opts[b'changelog'] = True
1724 opts[b'changelog'] = True
1713
1725
1714 if opts.get(b'changelog') or opts.get(b'manifest'):
1726 if opts.get(b'changelog') or opts.get(b'manifest'):
1715 file_, rev = None, file_
1727 file_, rev = None, file_
1716 elif rev is None:
1728 elif rev is None:
1717 raise error.CommandError(b'perfunidiff', b'invalid arguments')
1729 raise error.CommandError(b'perfunidiff', b'invalid arguments')
1718
1730
1719 textpairs = []
1731 textpairs = []
1720
1732
1721 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
1733 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
1722
1734
1723 startrev = r.rev(r.lookup(rev))
1735 startrev = r.rev(r.lookup(rev))
1724 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1736 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1725 if opts[b'alldata']:
1737 if opts[b'alldata']:
1726 # Load revisions associated with changeset.
1738 # Load revisions associated with changeset.
1727 ctx = repo[rev]
1739 ctx = repo[rev]
1728 mtext = _manifestrevision(repo, ctx.manifestnode())
1740 mtext = _manifestrevision(repo, ctx.manifestnode())
1729 for pctx in ctx.parents():
1741 for pctx in ctx.parents():
1730 pman = _manifestrevision(repo, pctx.manifestnode())
1742 pman = _manifestrevision(repo, pctx.manifestnode())
1731 textpairs.append((pman, mtext))
1743 textpairs.append((pman, mtext))
1732
1744
1733 # Load filelog revisions by iterating manifest delta.
1745 # Load filelog revisions by iterating manifest delta.
1734 man = ctx.manifest()
1746 man = ctx.manifest()
1735 pman = ctx.p1().manifest()
1747 pman = ctx.p1().manifest()
1736 for filename, change in pman.diff(man).items():
1748 for filename, change in pman.diff(man).items():
1737 fctx = repo.file(filename)
1749 fctx = repo.file(filename)
1738 f1 = fctx.revision(change[0][0] or -1)
1750 f1 = fctx.revision(change[0][0] or -1)
1739 f2 = fctx.revision(change[1][0] or -1)
1751 f2 = fctx.revision(change[1][0] or -1)
1740 textpairs.append((f1, f2))
1752 textpairs.append((f1, f2))
1741 else:
1753 else:
1742 dp = r.deltaparent(rev)
1754 dp = r.deltaparent(rev)
1743 textpairs.append((r.revision(dp), r.revision(rev)))
1755 textpairs.append((r.revision(dp), r.revision(rev)))
1744
1756
1745 def d():
1757 def d():
1746 for left, right in textpairs:
1758 for left, right in textpairs:
1747 # The date strings don't matter, so we pass empty strings.
1759 # The date strings don't matter, so we pass empty strings.
1748 headerlines, hunks = mdiff.unidiff(
1760 headerlines, hunks = mdiff.unidiff(
1749 left, b'', right, b'', b'left', b'right', binary=False)
1761 left, b'', right, b'', b'left', b'right', binary=False)
1750 # consume iterators in roughly the way patch.py does
1762 # consume iterators in roughly the way patch.py does
1751 b'\n'.join(headerlines)
1763 b'\n'.join(headerlines)
1752 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1764 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1753 timer, fm = gettimer(ui, opts)
1765 timer, fm = gettimer(ui, opts)
1754 timer(d)
1766 timer(d)
1755 fm.end()
1767 fm.end()
1756
1768
1757 @command(b'perfdiffwd', formatteropts)
1769 @command(b'perfdiffwd', formatteropts)
1758 def perfdiffwd(ui, repo, **opts):
1770 def perfdiffwd(ui, repo, **opts):
1759 """Profile diff of working directory changes"""
1771 """Profile diff of working directory changes"""
1760 opts = _byteskwargs(opts)
1772 opts = _byteskwargs(opts)
1761 timer, fm = gettimer(ui, opts)
1773 timer, fm = gettimer(ui, opts)
1762 options = {
1774 options = {
1763 'w': 'ignore_all_space',
1775 'w': 'ignore_all_space',
1764 'b': 'ignore_space_change',
1776 'b': 'ignore_space_change',
1765 'B': 'ignore_blank_lines',
1777 'B': 'ignore_blank_lines',
1766 }
1778 }
1767
1779
1768 for diffopt in ('', 'w', 'b', 'B', 'wB'):
1780 for diffopt in ('', 'w', 'b', 'B', 'wB'):
1769 opts = dict((options[c], b'1') for c in diffopt)
1781 opts = dict((options[c], b'1') for c in diffopt)
1770 def d():
1782 def d():
1771 ui.pushbuffer()
1783 ui.pushbuffer()
1772 commands.diff(ui, repo, **opts)
1784 commands.diff(ui, repo, **opts)
1773 ui.popbuffer()
1785 ui.popbuffer()
1774 diffopt = diffopt.encode('ascii')
1786 diffopt = diffopt.encode('ascii')
1775 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
1787 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
1776 timer(d, title=title)
1788 timer(d, title=title)
1777 fm.end()
1789 fm.end()
1778
1790
1779 @command(b'perfrevlogindex', revlogopts + formatteropts,
1791 @command(b'perfrevlogindex', revlogopts + formatteropts,
1780 b'-c|-m|FILE')
1792 b'-c|-m|FILE')
1781 def perfrevlogindex(ui, repo, file_=None, **opts):
1793 def perfrevlogindex(ui, repo, file_=None, **opts):
1782 """Benchmark operations against a revlog index.
1794 """Benchmark operations against a revlog index.
1783
1795
1784 This tests constructing a revlog instance, reading index data,
1796 This tests constructing a revlog instance, reading index data,
1785 parsing index data, and performing various operations related to
1797 parsing index data, and performing various operations related to
1786 index data.
1798 index data.
1787 """
1799 """
1788
1800
1789 opts = _byteskwargs(opts)
1801 opts = _byteskwargs(opts)
1790
1802
1791 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
1803 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
1792
1804
1793 opener = getattr(rl, 'opener') # trick linter
1805 opener = getattr(rl, 'opener') # trick linter
1794 indexfile = rl.indexfile
1806 indexfile = rl.indexfile
1795 data = opener.read(indexfile)
1807 data = opener.read(indexfile)
1796
1808
1797 header = struct.unpack(b'>I', data[0:4])[0]
1809 header = struct.unpack(b'>I', data[0:4])[0]
1798 version = header & 0xFFFF
1810 version = header & 0xFFFF
1799 if version == 1:
1811 if version == 1:
1800 revlogio = revlog.revlogio()
1812 revlogio = revlog.revlogio()
1801 inline = header & (1 << 16)
1813 inline = header & (1 << 16)
1802 else:
1814 else:
1803 raise error.Abort((b'unsupported revlog version: %d') % version)
1815 raise error.Abort((b'unsupported revlog version: %d') % version)
1804
1816
1805 rllen = len(rl)
1817 rllen = len(rl)
1806
1818
1807 node0 = rl.node(0)
1819 node0 = rl.node(0)
1808 node25 = rl.node(rllen // 4)
1820 node25 = rl.node(rllen // 4)
1809 node50 = rl.node(rllen // 2)
1821 node50 = rl.node(rllen // 2)
1810 node75 = rl.node(rllen // 4 * 3)
1822 node75 = rl.node(rllen // 4 * 3)
1811 node100 = rl.node(rllen - 1)
1823 node100 = rl.node(rllen - 1)
1812
1824
1813 allrevs = range(rllen)
1825 allrevs = range(rllen)
1814 allrevsrev = list(reversed(allrevs))
1826 allrevsrev = list(reversed(allrevs))
1815 allnodes = [rl.node(rev) for rev in range(rllen)]
1827 allnodes = [rl.node(rev) for rev in range(rllen)]
1816 allnodesrev = list(reversed(allnodes))
1828 allnodesrev = list(reversed(allnodes))
1817
1829
1818 def constructor():
1830 def constructor():
1819 revlog.revlog(opener, indexfile)
1831 revlog.revlog(opener, indexfile)
1820
1832
1821 def read():
1833 def read():
1822 with opener(indexfile) as fh:
1834 with opener(indexfile) as fh:
1823 fh.read()
1835 fh.read()
1824
1836
1825 def parseindex():
1837 def parseindex():
1826 revlogio.parseindex(data, inline)
1838 revlogio.parseindex(data, inline)
1827
1839
1828 def getentry(revornode):
1840 def getentry(revornode):
1829 index = revlogio.parseindex(data, inline)[0]
1841 index = revlogio.parseindex(data, inline)[0]
1830 index[revornode]
1842 index[revornode]
1831
1843
1832 def getentries(revs, count=1):
1844 def getentries(revs, count=1):
1833 index = revlogio.parseindex(data, inline)[0]
1845 index = revlogio.parseindex(data, inline)[0]
1834
1846
1835 for i in range(count):
1847 for i in range(count):
1836 for rev in revs:
1848 for rev in revs:
1837 index[rev]
1849 index[rev]
1838
1850
1839 def resolvenode(node):
1851 def resolvenode(node):
1840 nodemap = revlogio.parseindex(data, inline)[1]
1852 nodemap = revlogio.parseindex(data, inline)[1]
1841 # This only works for the C code.
1853 # This only works for the C code.
1842 if nodemap is None:
1854 if nodemap is None:
1843 return
1855 return
1844
1856
1845 try:
1857 try:
1846 nodemap[node]
1858 nodemap[node]
1847 except error.RevlogError:
1859 except error.RevlogError:
1848 pass
1860 pass
1849
1861
1850 def resolvenodes(nodes, count=1):
1862 def resolvenodes(nodes, count=1):
1851 nodemap = revlogio.parseindex(data, inline)[1]
1863 nodemap = revlogio.parseindex(data, inline)[1]
1852 if nodemap is None:
1864 if nodemap is None:
1853 return
1865 return
1854
1866
1855 for i in range(count):
1867 for i in range(count):
1856 for node in nodes:
1868 for node in nodes:
1857 try:
1869 try:
1858 nodemap[node]
1870 nodemap[node]
1859 except error.RevlogError:
1871 except error.RevlogError:
1860 pass
1872 pass
1861
1873
1862 benches = [
1874 benches = [
1863 (constructor, b'revlog constructor'),
1875 (constructor, b'revlog constructor'),
1864 (read, b'read'),
1876 (read, b'read'),
1865 (parseindex, b'create index object'),
1877 (parseindex, b'create index object'),
1866 (lambda: getentry(0), b'retrieve index entry for rev 0'),
1878 (lambda: getentry(0), b'retrieve index entry for rev 0'),
1867 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
1879 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
1868 (lambda: resolvenode(node0), b'look up node at rev 0'),
1880 (lambda: resolvenode(node0), b'look up node at rev 0'),
1869 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
1881 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
1870 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
1882 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
1871 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
1883 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
1872 (lambda: resolvenode(node100), b'look up node at tip'),
1884 (lambda: resolvenode(node100), b'look up node at tip'),
1873 # 2x variation is to measure caching impact.
1885 # 2x variation is to measure caching impact.
1874 (lambda: resolvenodes(allnodes),
1886 (lambda: resolvenodes(allnodes),
1875 b'look up all nodes (forward)'),
1887 b'look up all nodes (forward)'),
1876 (lambda: resolvenodes(allnodes, 2),
1888 (lambda: resolvenodes(allnodes, 2),
1877 b'look up all nodes 2x (forward)'),
1889 b'look up all nodes 2x (forward)'),
1878 (lambda: resolvenodes(allnodesrev),
1890 (lambda: resolvenodes(allnodesrev),
1879 b'look up all nodes (reverse)'),
1891 b'look up all nodes (reverse)'),
1880 (lambda: resolvenodes(allnodesrev, 2),
1892 (lambda: resolvenodes(allnodesrev, 2),
1881 b'look up all nodes 2x (reverse)'),
1893 b'look up all nodes 2x (reverse)'),
1882 (lambda: getentries(allrevs),
1894 (lambda: getentries(allrevs),
1883 b'retrieve all index entries (forward)'),
1895 b'retrieve all index entries (forward)'),
1884 (lambda: getentries(allrevs, 2),
1896 (lambda: getentries(allrevs, 2),
1885 b'retrieve all index entries 2x (forward)'),
1897 b'retrieve all index entries 2x (forward)'),
1886 (lambda: getentries(allrevsrev),
1898 (lambda: getentries(allrevsrev),
1887 b'retrieve all index entries (reverse)'),
1899 b'retrieve all index entries (reverse)'),
1888 (lambda: getentries(allrevsrev, 2),
1900 (lambda: getentries(allrevsrev, 2),
1889 b'retrieve all index entries 2x (reverse)'),
1901 b'retrieve all index entries 2x (reverse)'),
1890 ]
1902 ]
1891
1903
1892 for fn, title in benches:
1904 for fn, title in benches:
1893 timer, fm = gettimer(ui, opts)
1905 timer, fm = gettimer(ui, opts)
1894 timer(fn, title=title)
1906 timer(fn, title=title)
1895 fm.end()
1907 fm.end()
1896
1908
1897 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
1909 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
1898 [(b'd', b'dist', 100, b'distance between the revisions'),
1910 [(b'd', b'dist', 100, b'distance between the revisions'),
1899 (b's', b'startrev', 0, b'revision to start reading at'),
1911 (b's', b'startrev', 0, b'revision to start reading at'),
1900 (b'', b'reverse', False, b'read in reverse')],
1912 (b'', b'reverse', False, b'read in reverse')],
1901 b'-c|-m|FILE')
1913 b'-c|-m|FILE')
1902 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
1914 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
1903 **opts):
1915 **opts):
1904 """Benchmark reading a series of revisions from a revlog.
1916 """Benchmark reading a series of revisions from a revlog.
1905
1917
1906 By default, we read every ``-d/--dist`` revision from 0 to tip of
1918 By default, we read every ``-d/--dist`` revision from 0 to tip of
1907 the specified revlog.
1919 the specified revlog.
1908
1920
1909 The start revision can be defined via ``-s/--startrev``.
1921 The start revision can be defined via ``-s/--startrev``.
1910 """
1922 """
1911 opts = _byteskwargs(opts)
1923 opts = _byteskwargs(opts)
1912
1924
1913 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
1925 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
1914 rllen = getlen(ui)(rl)
1926 rllen = getlen(ui)(rl)
1915
1927
1916 if startrev < 0:
1928 if startrev < 0:
1917 startrev = rllen + startrev
1929 startrev = rllen + startrev
1918
1930
1919 def d():
1931 def d():
1920 rl.clearcaches()
1932 rl.clearcaches()
1921
1933
1922 beginrev = startrev
1934 beginrev = startrev
1923 endrev = rllen
1935 endrev = rllen
1924 dist = opts[b'dist']
1936 dist = opts[b'dist']
1925
1937
1926 if reverse:
1938 if reverse:
1927 beginrev, endrev = endrev - 1, beginrev - 1
1939 beginrev, endrev = endrev - 1, beginrev - 1
1928 dist = -1 * dist
1940 dist = -1 * dist
1929
1941
1930 for x in _xrange(beginrev, endrev, dist):
1942 for x in _xrange(beginrev, endrev, dist):
1931 # Old revisions don't support passing int.
1943 # Old revisions don't support passing int.
1932 n = rl.node(x)
1944 n = rl.node(x)
1933 rl.revision(n)
1945 rl.revision(n)
1934
1946
1935 timer, fm = gettimer(ui, opts)
1947 timer, fm = gettimer(ui, opts)
1936 timer(d)
1948 timer(d)
1937 fm.end()
1949 fm.end()
1938
1950
1939 @command(b'perfrevlogwrite', revlogopts + formatteropts +
1951 @command(b'perfrevlogwrite', revlogopts + formatteropts +
1940 [(b's', b'startrev', 1000, b'revision to start writing at'),
1952 [(b's', b'startrev', 1000, b'revision to start writing at'),
1941 (b'', b'stoprev', -1, b'last revision to write'),
1953 (b'', b'stoprev', -1, b'last revision to write'),
1942 (b'', b'count', 3, b'last revision to write'),
1954 (b'', b'count', 3, b'last revision to write'),
1943 (b'', b'details', False, b'print timing for every revisions tested'),
1955 (b'', b'details', False, b'print timing for every revisions tested'),
1944 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
1956 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
1945 (b'', b'lazydeltabase', True, b'try the provided delta first'),
1957 (b'', b'lazydeltabase', True, b'try the provided delta first'),
1946 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1958 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1947 ],
1959 ],
1948 b'-c|-m|FILE')
1960 b'-c|-m|FILE')
1949 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
1961 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
1950 """Benchmark writing a series of revisions to a revlog.
1962 """Benchmark writing a series of revisions to a revlog.
1951
1963
1952 Possible source values are:
1964 Possible source values are:
1953 * `full`: add from a full text (default).
1965 * `full`: add from a full text (default).
1954 * `parent-1`: add from a delta to the first parent
1966 * `parent-1`: add from a delta to the first parent
1955 * `parent-2`: add from a delta to the second parent if it exists
1967 * `parent-2`: add from a delta to the second parent if it exists
1956 (use a delta from the first parent otherwise)
1968 (use a delta from the first parent otherwise)
1957 * `parent-smallest`: add from the smallest delta (either p1 or p2)
1969 * `parent-smallest`: add from the smallest delta (either p1 or p2)
1958 * `storage`: add from the existing precomputed deltas
1970 * `storage`: add from the existing precomputed deltas
1959 """
1971 """
1960 opts = _byteskwargs(opts)
1972 opts = _byteskwargs(opts)
1961
1973
1962 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
1974 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
1963 rllen = getlen(ui)(rl)
1975 rllen = getlen(ui)(rl)
1964 if startrev < 0:
1976 if startrev < 0:
1965 startrev = rllen + startrev
1977 startrev = rllen + startrev
1966 if stoprev < 0:
1978 if stoprev < 0:
1967 stoprev = rllen + stoprev
1979 stoprev = rllen + stoprev
1968
1980
1969 lazydeltabase = opts['lazydeltabase']
1981 lazydeltabase = opts['lazydeltabase']
1970 source = opts['source']
1982 source = opts['source']
1971 clearcaches = opts['clear_caches']
1983 clearcaches = opts['clear_caches']
1972 validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest',
1984 validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest',
1973 b'storage')
1985 b'storage')
1974 if source not in validsource:
1986 if source not in validsource:
1975 raise error.Abort('invalid source type: %s' % source)
1987 raise error.Abort('invalid source type: %s' % source)
1976
1988
1977 ### actually gather results
1989 ### actually gather results
1978 count = opts['count']
1990 count = opts['count']
1979 if count <= 0:
1991 if count <= 0:
1980 raise error.Abort('invalide run count: %d' % count)
1992 raise error.Abort('invalide run count: %d' % count)
1981 allresults = []
1993 allresults = []
1982 for c in range(count):
1994 for c in range(count):
1983 timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1,
1995 timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1,
1984 lazydeltabase=lazydeltabase,
1996 lazydeltabase=lazydeltabase,
1985 clearcaches=clearcaches)
1997 clearcaches=clearcaches)
1986 allresults.append(timing)
1998 allresults.append(timing)
1987
1999
1988 ### consolidate the results in a single list
2000 ### consolidate the results in a single list
1989 results = []
2001 results = []
1990 for idx, (rev, t) in enumerate(allresults[0]):
2002 for idx, (rev, t) in enumerate(allresults[0]):
1991 ts = [t]
2003 ts = [t]
1992 for other in allresults[1:]:
2004 for other in allresults[1:]:
1993 orev, ot = other[idx]
2005 orev, ot = other[idx]
1994 assert orev == rev
2006 assert orev == rev
1995 ts.append(ot)
2007 ts.append(ot)
1996 results.append((rev, ts))
2008 results.append((rev, ts))
1997 resultcount = len(results)
2009 resultcount = len(results)
1998
2010
1999 ### Compute and display relevant statistics
2011 ### Compute and display relevant statistics
2000
2012
2001 # get a formatter
2013 # get a formatter
2002 fm = ui.formatter(b'perf', opts)
2014 fm = ui.formatter(b'perf', opts)
2003 displayall = ui.configbool(b"perf", b"all-timing", False)
2015 displayall = ui.configbool(b"perf", b"all-timing", False)
2004
2016
2005 # print individual details if requested
2017 # print individual details if requested
2006 if opts['details']:
2018 if opts['details']:
2007 for idx, item in enumerate(results, 1):
2019 for idx, item in enumerate(results, 1):
2008 rev, data = item
2020 rev, data = item
2009 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
2021 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
2010 formatone(fm, data, title=title, displayall=displayall)
2022 formatone(fm, data, title=title, displayall=displayall)
2011
2023
2012 # sorts results by median time
2024 # sorts results by median time
2013 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
2025 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
2014 # list of (name, index) to display)
2026 # list of (name, index) to display)
2015 relevants = [
2027 relevants = [
2016 ("min", 0),
2028 ("min", 0),
2017 ("10%", resultcount * 10 // 100),
2029 ("10%", resultcount * 10 // 100),
2018 ("25%", resultcount * 25 // 100),
2030 ("25%", resultcount * 25 // 100),
2019 ("50%", resultcount * 70 // 100),
2031 ("50%", resultcount * 70 // 100),
2020 ("75%", resultcount * 75 // 100),
2032 ("75%", resultcount * 75 // 100),
2021 ("90%", resultcount * 90 // 100),
2033 ("90%", resultcount * 90 // 100),
2022 ("95%", resultcount * 95 // 100),
2034 ("95%", resultcount * 95 // 100),
2023 ("99%", resultcount * 99 // 100),
2035 ("99%", resultcount * 99 // 100),
2024 ("99.9%", resultcount * 999 // 1000),
2036 ("99.9%", resultcount * 999 // 1000),
2025 ("99.99%", resultcount * 9999 // 10000),
2037 ("99.99%", resultcount * 9999 // 10000),
2026 ("99.999%", resultcount * 99999 // 100000),
2038 ("99.999%", resultcount * 99999 // 100000),
2027 ("max", -1),
2039 ("max", -1),
2028 ]
2040 ]
2029 if not ui.quiet:
2041 if not ui.quiet:
2030 for name, idx in relevants:
2042 for name, idx in relevants:
2031 data = results[idx]
2043 data = results[idx]
2032 title = '%s of %d, rev %d' % (name, resultcount, data[0])
2044 title = '%s of %d, rev %d' % (name, resultcount, data[0])
2033 formatone(fm, data[1], title=title, displayall=displayall)
2045 formatone(fm, data[1], title=title, displayall=displayall)
2034
2046
2035 # XXX summing that many float will not be very precise, we ignore this fact
2047 # XXX summing that many float will not be very precise, we ignore this fact
2036 # for now
2048 # for now
2037 totaltime = []
2049 totaltime = []
2038 for item in allresults:
2050 for item in allresults:
2039 totaltime.append((sum(x[1][0] for x in item),
2051 totaltime.append((sum(x[1][0] for x in item),
2040 sum(x[1][1] for x in item),
2052 sum(x[1][1] for x in item),
2041 sum(x[1][2] for x in item),)
2053 sum(x[1][2] for x in item),)
2042 )
2054 )
2043 formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
2055 formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
2044 displayall=displayall)
2056 displayall=displayall)
2045 fm.end()
2057 fm.end()
2046
2058
2047 class _faketr(object):
2059 class _faketr(object):
2048 def add(s, x, y, z=None):
2060 def add(s, x, y, z=None):
2049 return None
2061 return None
2050
2062
2051 def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None,
2063 def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None,
2052 lazydeltabase=True, clearcaches=True):
2064 lazydeltabase=True, clearcaches=True):
2053 timings = []
2065 timings = []
2054 tr = _faketr()
2066 tr = _faketr()
2055 with _temprevlog(ui, orig, startrev) as dest:
2067 with _temprevlog(ui, orig, startrev) as dest:
2056 dest._lazydeltabase = lazydeltabase
2068 dest._lazydeltabase = lazydeltabase
2057 revs = list(orig.revs(startrev, stoprev))
2069 revs = list(orig.revs(startrev, stoprev))
2058 total = len(revs)
2070 total = len(revs)
2059 topic = 'adding'
2071 topic = 'adding'
2060 if runidx is not None:
2072 if runidx is not None:
2061 topic += ' (run #%d)' % runidx
2073 topic += ' (run #%d)' % runidx
2062 # Support both old and new progress API
2074 # Support both old and new progress API
2063 if util.safehasattr(ui, 'makeprogress'):
2075 if util.safehasattr(ui, 'makeprogress'):
2064 progress = ui.makeprogress(topic, unit='revs', total=total)
2076 progress = ui.makeprogress(topic, unit='revs', total=total)
2065 def updateprogress(pos):
2077 def updateprogress(pos):
2066 progress.update(pos)
2078 progress.update(pos)
2067 def completeprogress():
2079 def completeprogress():
2068 progress.complete()
2080 progress.complete()
2069 else:
2081 else:
2070 def updateprogress(pos):
2082 def updateprogress(pos):
2071 ui.progress(topic, pos, unit='revs', total=total)
2083 ui.progress(topic, pos, unit='revs', total=total)
2072 def completeprogress():
2084 def completeprogress():
2073 ui.progress(topic, None, unit='revs', total=total)
2085 ui.progress(topic, None, unit='revs', total=total)
2074
2086
2075 for idx, rev in enumerate(revs):
2087 for idx, rev in enumerate(revs):
2076 updateprogress(idx)
2088 updateprogress(idx)
2077 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
2089 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
2078 if clearcaches:
2090 if clearcaches:
2079 dest.index.clearcaches()
2091 dest.index.clearcaches()
2080 dest.clearcaches()
2092 dest.clearcaches()
2081 with timeone() as r:
2093 with timeone() as r:
2082 dest.addrawrevision(*addargs, **addkwargs)
2094 dest.addrawrevision(*addargs, **addkwargs)
2083 timings.append((rev, r[0]))
2095 timings.append((rev, r[0]))
2084 updateprogress(total)
2096 updateprogress(total)
2085 completeprogress()
2097 completeprogress()
2086 return timings
2098 return timings
2087
2099
2088 def _getrevisionseed(orig, rev, tr, source):
2100 def _getrevisionseed(orig, rev, tr, source):
2089 from mercurial.node import nullid
2101 from mercurial.node import nullid
2090
2102
2091 linkrev = orig.linkrev(rev)
2103 linkrev = orig.linkrev(rev)
2092 node = orig.node(rev)
2104 node = orig.node(rev)
2093 p1, p2 = orig.parents(node)
2105 p1, p2 = orig.parents(node)
2094 flags = orig.flags(rev)
2106 flags = orig.flags(rev)
2095 cachedelta = None
2107 cachedelta = None
2096 text = None
2108 text = None
2097
2109
2098 if source == b'full':
2110 if source == b'full':
2099 text = orig.revision(rev)
2111 text = orig.revision(rev)
2100 elif source == b'parent-1':
2112 elif source == b'parent-1':
2101 baserev = orig.rev(p1)
2113 baserev = orig.rev(p1)
2102 cachedelta = (baserev, orig.revdiff(p1, rev))
2114 cachedelta = (baserev, orig.revdiff(p1, rev))
2103 elif source == b'parent-2':
2115 elif source == b'parent-2':
2104 parent = p2
2116 parent = p2
2105 if p2 == nullid:
2117 if p2 == nullid:
2106 parent = p1
2118 parent = p1
2107 baserev = orig.rev(parent)
2119 baserev = orig.rev(parent)
2108 cachedelta = (baserev, orig.revdiff(parent, rev))
2120 cachedelta = (baserev, orig.revdiff(parent, rev))
2109 elif source == b'parent-smallest':
2121 elif source == b'parent-smallest':
2110 p1diff = orig.revdiff(p1, rev)
2122 p1diff = orig.revdiff(p1, rev)
2111 parent = p1
2123 parent = p1
2112 diff = p1diff
2124 diff = p1diff
2113 if p2 != nullid:
2125 if p2 != nullid:
2114 p2diff = orig.revdiff(p2, rev)
2126 p2diff = orig.revdiff(p2, rev)
2115 if len(p1diff) > len(p2diff):
2127 if len(p1diff) > len(p2diff):
2116 parent = p2
2128 parent = p2
2117 diff = p2diff
2129 diff = p2diff
2118 baserev = orig.rev(parent)
2130 baserev = orig.rev(parent)
2119 cachedelta = (baserev, diff)
2131 cachedelta = (baserev, diff)
2120 elif source == b'storage':
2132 elif source == b'storage':
2121 baserev = orig.deltaparent(rev)
2133 baserev = orig.deltaparent(rev)
2122 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
2134 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
2123
2135
2124 return ((text, tr, linkrev, p1, p2),
2136 return ((text, tr, linkrev, p1, p2),
2125 {'node': node, 'flags': flags, 'cachedelta': cachedelta})
2137 {'node': node, 'flags': flags, 'cachedelta': cachedelta})
2126
2138
2127 @contextlib.contextmanager
2139 @contextlib.contextmanager
2128 def _temprevlog(ui, orig, truncaterev):
2140 def _temprevlog(ui, orig, truncaterev):
2129 from mercurial import vfs as vfsmod
2141 from mercurial import vfs as vfsmod
2130
2142
2131 if orig._inline:
2143 if orig._inline:
2132 raise error.Abort('not supporting inline revlog (yet)')
2144 raise error.Abort('not supporting inline revlog (yet)')
2133
2145
2134 origindexpath = orig.opener.join(orig.indexfile)
2146 origindexpath = orig.opener.join(orig.indexfile)
2135 origdatapath = orig.opener.join(orig.datafile)
2147 origdatapath = orig.opener.join(orig.datafile)
2136 indexname = 'revlog.i'
2148 indexname = 'revlog.i'
2137 dataname = 'revlog.d'
2149 dataname = 'revlog.d'
2138
2150
2139 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
2151 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
2140 try:
2152 try:
2141 # copy the data file in a temporary directory
2153 # copy the data file in a temporary directory
2142 ui.debug('copying data in %s\n' % tmpdir)
2154 ui.debug('copying data in %s\n' % tmpdir)
2143 destindexpath = os.path.join(tmpdir, 'revlog.i')
2155 destindexpath = os.path.join(tmpdir, 'revlog.i')
2144 destdatapath = os.path.join(tmpdir, 'revlog.d')
2156 destdatapath = os.path.join(tmpdir, 'revlog.d')
2145 shutil.copyfile(origindexpath, destindexpath)
2157 shutil.copyfile(origindexpath, destindexpath)
2146 shutil.copyfile(origdatapath, destdatapath)
2158 shutil.copyfile(origdatapath, destdatapath)
2147
2159
2148 # remove the data we want to add again
2160 # remove the data we want to add again
2149 ui.debug('truncating data to be rewritten\n')
2161 ui.debug('truncating data to be rewritten\n')
2150 with open(destindexpath, 'ab') as index:
2162 with open(destindexpath, 'ab') as index:
2151 index.seek(0)
2163 index.seek(0)
2152 index.truncate(truncaterev * orig._io.size)
2164 index.truncate(truncaterev * orig._io.size)
2153 with open(destdatapath, 'ab') as data:
2165 with open(destdatapath, 'ab') as data:
2154 data.seek(0)
2166 data.seek(0)
2155 data.truncate(orig.start(truncaterev))
2167 data.truncate(orig.start(truncaterev))
2156
2168
2157 # instantiate a new revlog from the temporary copy
2169 # instantiate a new revlog from the temporary copy
2158 ui.debug('truncating adding to be rewritten\n')
2170 ui.debug('truncating adding to be rewritten\n')
2159 vfs = vfsmod.vfs(tmpdir)
2171 vfs = vfsmod.vfs(tmpdir)
2160 vfs.options = getattr(orig.opener, 'options', None)
2172 vfs.options = getattr(orig.opener, 'options', None)
2161
2173
2162 dest = revlog.revlog(vfs,
2174 dest = revlog.revlog(vfs,
2163 indexfile=indexname,
2175 indexfile=indexname,
2164 datafile=dataname)
2176 datafile=dataname)
2165 if dest._inline:
2177 if dest._inline:
2166 raise error.Abort('not supporting inline revlog (yet)')
2178 raise error.Abort('not supporting inline revlog (yet)')
2167 # make sure internals are initialized
2179 # make sure internals are initialized
2168 dest.revision(len(dest) - 1)
2180 dest.revision(len(dest) - 1)
2169 yield dest
2181 yield dest
2170 del dest, vfs
2182 del dest, vfs
2171 finally:
2183 finally:
2172 shutil.rmtree(tmpdir, True)
2184 shutil.rmtree(tmpdir, True)
2173
2185
2174 @command(b'perfrevlogchunks', revlogopts + formatteropts +
2186 @command(b'perfrevlogchunks', revlogopts + formatteropts +
2175 [(b'e', b'engines', b'', b'compression engines to use'),
2187 [(b'e', b'engines', b'', b'compression engines to use'),
2176 (b's', b'startrev', 0, b'revision to start at')],
2188 (b's', b'startrev', 0, b'revision to start at')],
2177 b'-c|-m|FILE')
2189 b'-c|-m|FILE')
2178 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
2190 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
2179 """Benchmark operations on revlog chunks.
2191 """Benchmark operations on revlog chunks.
2180
2192
2181 Logically, each revlog is a collection of fulltext revisions. However,
2193 Logically, each revlog is a collection of fulltext revisions. However,
2182 stored within each revlog are "chunks" of possibly compressed data. This
2194 stored within each revlog are "chunks" of possibly compressed data. This
2183 data needs to be read and decompressed or compressed and written.
2195 data needs to be read and decompressed or compressed and written.
2184
2196
2185 This command measures the time it takes to read+decompress and recompress
2197 This command measures the time it takes to read+decompress and recompress
2186 chunks in a revlog. It effectively isolates I/O and compression performance.
2198 chunks in a revlog. It effectively isolates I/O and compression performance.
2187 For measurements of higher-level operations like resolving revisions,
2199 For measurements of higher-level operations like resolving revisions,
2188 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
2200 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
2189 """
2201 """
2190 opts = _byteskwargs(opts)
2202 opts = _byteskwargs(opts)
2191
2203
2192 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
2204 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
2193
2205
2194 # _chunkraw was renamed to _getsegmentforrevs.
2206 # _chunkraw was renamed to _getsegmentforrevs.
2195 try:
2207 try:
2196 segmentforrevs = rl._getsegmentforrevs
2208 segmentforrevs = rl._getsegmentforrevs
2197 except AttributeError:
2209 except AttributeError:
2198 segmentforrevs = rl._chunkraw
2210 segmentforrevs = rl._chunkraw
2199
2211
2200 # Verify engines argument.
2212 # Verify engines argument.
2201 if engines:
2213 if engines:
2202 engines = set(e.strip() for e in engines.split(b','))
2214 engines = set(e.strip() for e in engines.split(b','))
2203 for engine in engines:
2215 for engine in engines:
2204 try:
2216 try:
2205 util.compressionengines[engine]
2217 util.compressionengines[engine]
2206 except KeyError:
2218 except KeyError:
2207 raise error.Abort(b'unknown compression engine: %s' % engine)
2219 raise error.Abort(b'unknown compression engine: %s' % engine)
2208 else:
2220 else:
2209 engines = []
2221 engines = []
2210 for e in util.compengines:
2222 for e in util.compengines:
2211 engine = util.compengines[e]
2223 engine = util.compengines[e]
2212 try:
2224 try:
2213 if engine.available():
2225 if engine.available():
2214 engine.revlogcompressor().compress(b'dummy')
2226 engine.revlogcompressor().compress(b'dummy')
2215 engines.append(e)
2227 engines.append(e)
2216 except NotImplementedError:
2228 except NotImplementedError:
2217 pass
2229 pass
2218
2230
2219 revs = list(rl.revs(startrev, len(rl) - 1))
2231 revs = list(rl.revs(startrev, len(rl) - 1))
2220
2232
2221 def rlfh(rl):
2233 def rlfh(rl):
2222 if rl._inline:
2234 if rl._inline:
2223 return getsvfs(repo)(rl.indexfile)
2235 return getsvfs(repo)(rl.indexfile)
2224 else:
2236 else:
2225 return getsvfs(repo)(rl.datafile)
2237 return getsvfs(repo)(rl.datafile)
2226
2238
2227 def doread():
2239 def doread():
2228 rl.clearcaches()
2240 rl.clearcaches()
2229 for rev in revs:
2241 for rev in revs:
2230 segmentforrevs(rev, rev)
2242 segmentforrevs(rev, rev)
2231
2243
2232 def doreadcachedfh():
2244 def doreadcachedfh():
2233 rl.clearcaches()
2245 rl.clearcaches()
2234 fh = rlfh(rl)
2246 fh = rlfh(rl)
2235 for rev in revs:
2247 for rev in revs:
2236 segmentforrevs(rev, rev, df=fh)
2248 segmentforrevs(rev, rev, df=fh)
2237
2249
2238 def doreadbatch():
2250 def doreadbatch():
2239 rl.clearcaches()
2251 rl.clearcaches()
2240 segmentforrevs(revs[0], revs[-1])
2252 segmentforrevs(revs[0], revs[-1])
2241
2253
2242 def doreadbatchcachedfh():
2254 def doreadbatchcachedfh():
2243 rl.clearcaches()
2255 rl.clearcaches()
2244 fh = rlfh(rl)
2256 fh = rlfh(rl)
2245 segmentforrevs(revs[0], revs[-1], df=fh)
2257 segmentforrevs(revs[0], revs[-1], df=fh)
2246
2258
2247 def dochunk():
2259 def dochunk():
2248 rl.clearcaches()
2260 rl.clearcaches()
2249 fh = rlfh(rl)
2261 fh = rlfh(rl)
2250 for rev in revs:
2262 for rev in revs:
2251 rl._chunk(rev, df=fh)
2263 rl._chunk(rev, df=fh)
2252
2264
2253 chunks = [None]
2265 chunks = [None]
2254
2266
2255 def dochunkbatch():
2267 def dochunkbatch():
2256 rl.clearcaches()
2268 rl.clearcaches()
2257 fh = rlfh(rl)
2269 fh = rlfh(rl)
2258 # Save chunks as a side-effect.
2270 # Save chunks as a side-effect.
2259 chunks[0] = rl._chunks(revs, df=fh)
2271 chunks[0] = rl._chunks(revs, df=fh)
2260
2272
2261 def docompress(compressor):
2273 def docompress(compressor):
2262 rl.clearcaches()
2274 rl.clearcaches()
2263
2275
2264 try:
2276 try:
2265 # Swap in the requested compression engine.
2277 # Swap in the requested compression engine.
2266 oldcompressor = rl._compressor
2278 oldcompressor = rl._compressor
2267 rl._compressor = compressor
2279 rl._compressor = compressor
2268 for chunk in chunks[0]:
2280 for chunk in chunks[0]:
2269 rl.compress(chunk)
2281 rl.compress(chunk)
2270 finally:
2282 finally:
2271 rl._compressor = oldcompressor
2283 rl._compressor = oldcompressor
2272
2284
2273 benches = [
2285 benches = [
2274 (lambda: doread(), b'read'),
2286 (lambda: doread(), b'read'),
2275 (lambda: doreadcachedfh(), b'read w/ reused fd'),
2287 (lambda: doreadcachedfh(), b'read w/ reused fd'),
2276 (lambda: doreadbatch(), b'read batch'),
2288 (lambda: doreadbatch(), b'read batch'),
2277 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
2289 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
2278 (lambda: dochunk(), b'chunk'),
2290 (lambda: dochunk(), b'chunk'),
2279 (lambda: dochunkbatch(), b'chunk batch'),
2291 (lambda: dochunkbatch(), b'chunk batch'),
2280 ]
2292 ]
2281
2293
2282 for engine in sorted(engines):
2294 for engine in sorted(engines):
2283 compressor = util.compengines[engine].revlogcompressor()
2295 compressor = util.compengines[engine].revlogcompressor()
2284 benches.append((functools.partial(docompress, compressor),
2296 benches.append((functools.partial(docompress, compressor),
2285 b'compress w/ %s' % engine))
2297 b'compress w/ %s' % engine))
2286
2298
2287 for fn, title in benches:
2299 for fn, title in benches:
2288 timer, fm = gettimer(ui, opts)
2300 timer, fm = gettimer(ui, opts)
2289 timer(fn, title=title)
2301 timer(fn, title=title)
2290 fm.end()
2302 fm.end()
2291
2303
2292 @command(b'perfrevlogrevision', revlogopts + formatteropts +
2304 @command(b'perfrevlogrevision', revlogopts + formatteropts +
2293 [(b'', b'cache', False, b'use caches instead of clearing')],
2305 [(b'', b'cache', False, b'use caches instead of clearing')],
2294 b'-c|-m|FILE REV')
2306 b'-c|-m|FILE REV')
2295 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
2307 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
2296 """Benchmark obtaining a revlog revision.
2308 """Benchmark obtaining a revlog revision.
2297
2309
2298 Obtaining a revlog revision consists of roughly the following steps:
2310 Obtaining a revlog revision consists of roughly the following steps:
2299
2311
2300 1. Compute the delta chain
2312 1. Compute the delta chain
2301 2. Slice the delta chain if applicable
2313 2. Slice the delta chain if applicable
2302 3. Obtain the raw chunks for that delta chain
2314 3. Obtain the raw chunks for that delta chain
2303 4. Decompress each raw chunk
2315 4. Decompress each raw chunk
2304 5. Apply binary patches to obtain fulltext
2316 5. Apply binary patches to obtain fulltext
2305 6. Verify hash of fulltext
2317 6. Verify hash of fulltext
2306
2318
2307 This command measures the time spent in each of these phases.
2319 This command measures the time spent in each of these phases.
2308 """
2320 """
2309 opts = _byteskwargs(opts)
2321 opts = _byteskwargs(opts)
2310
2322
2311 if opts.get(b'changelog') or opts.get(b'manifest'):
2323 if opts.get(b'changelog') or opts.get(b'manifest'):
2312 file_, rev = None, file_
2324 file_, rev = None, file_
2313 elif rev is None:
2325 elif rev is None:
2314 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
2326 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
2315
2327
2316 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
2328 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
2317
2329
2318 # _chunkraw was renamed to _getsegmentforrevs.
2330 # _chunkraw was renamed to _getsegmentforrevs.
2319 try:
2331 try:
2320 segmentforrevs = r._getsegmentforrevs
2332 segmentforrevs = r._getsegmentforrevs
2321 except AttributeError:
2333 except AttributeError:
2322 segmentforrevs = r._chunkraw
2334 segmentforrevs = r._chunkraw
2323
2335
2324 node = r.lookup(rev)
2336 node = r.lookup(rev)
2325 rev = r.rev(node)
2337 rev = r.rev(node)
2326
2338
2327 def getrawchunks(data, chain):
2339 def getrawchunks(data, chain):
2328 start = r.start
2340 start = r.start
2329 length = r.length
2341 length = r.length
2330 inline = r._inline
2342 inline = r._inline
2331 iosize = r._io.size
2343 iosize = r._io.size
2332 buffer = util.buffer
2344 buffer = util.buffer
2333
2345
2334 chunks = []
2346 chunks = []
2335 ladd = chunks.append
2347 ladd = chunks.append
2336 for idx, item in enumerate(chain):
2348 for idx, item in enumerate(chain):
2337 offset = start(item[0])
2349 offset = start(item[0])
2338 bits = data[idx]
2350 bits = data[idx]
2339 for rev in item:
2351 for rev in item:
2340 chunkstart = start(rev)
2352 chunkstart = start(rev)
2341 if inline:
2353 if inline:
2342 chunkstart += (rev + 1) * iosize
2354 chunkstart += (rev + 1) * iosize
2343 chunklength = length(rev)
2355 chunklength = length(rev)
2344 ladd(buffer(bits, chunkstart - offset, chunklength))
2356 ladd(buffer(bits, chunkstart - offset, chunklength))
2345
2357
2346 return chunks
2358 return chunks
2347
2359
2348 def dodeltachain(rev):
2360 def dodeltachain(rev):
2349 if not cache:
2361 if not cache:
2350 r.clearcaches()
2362 r.clearcaches()
2351 r._deltachain(rev)
2363 r._deltachain(rev)
2352
2364
2353 def doread(chain):
2365 def doread(chain):
2354 if not cache:
2366 if not cache:
2355 r.clearcaches()
2367 r.clearcaches()
2356 for item in slicedchain:
2368 for item in slicedchain:
2357 segmentforrevs(item[0], item[-1])
2369 segmentforrevs(item[0], item[-1])
2358
2370
2359 def doslice(r, chain, size):
2371 def doslice(r, chain, size):
2360 for s in slicechunk(r, chain, targetsize=size):
2372 for s in slicechunk(r, chain, targetsize=size):
2361 pass
2373 pass
2362
2374
2363 def dorawchunks(data, chain):
2375 def dorawchunks(data, chain):
2364 if not cache:
2376 if not cache:
2365 r.clearcaches()
2377 r.clearcaches()
2366 getrawchunks(data, chain)
2378 getrawchunks(data, chain)
2367
2379
2368 def dodecompress(chunks):
2380 def dodecompress(chunks):
2369 decomp = r.decompress
2381 decomp = r.decompress
2370 for chunk in chunks:
2382 for chunk in chunks:
2371 decomp(chunk)
2383 decomp(chunk)
2372
2384
2373 def dopatch(text, bins):
2385 def dopatch(text, bins):
2374 if not cache:
2386 if not cache:
2375 r.clearcaches()
2387 r.clearcaches()
2376 mdiff.patches(text, bins)
2388 mdiff.patches(text, bins)
2377
2389
2378 def dohash(text):
2390 def dohash(text):
2379 if not cache:
2391 if not cache:
2380 r.clearcaches()
2392 r.clearcaches()
2381 r.checkhash(text, node, rev=rev)
2393 r.checkhash(text, node, rev=rev)
2382
2394
2383 def dorevision():
2395 def dorevision():
2384 if not cache:
2396 if not cache:
2385 r.clearcaches()
2397 r.clearcaches()
2386 r.revision(node)
2398 r.revision(node)
2387
2399
2388 try:
2400 try:
2389 from mercurial.revlogutils.deltas import slicechunk
2401 from mercurial.revlogutils.deltas import slicechunk
2390 except ImportError:
2402 except ImportError:
2391 slicechunk = getattr(revlog, '_slicechunk', None)
2403 slicechunk = getattr(revlog, '_slicechunk', None)
2392
2404
2393 size = r.length(rev)
2405 size = r.length(rev)
2394 chain = r._deltachain(rev)[0]
2406 chain = r._deltachain(rev)[0]
2395 if not getattr(r, '_withsparseread', False):
2407 if not getattr(r, '_withsparseread', False):
2396 slicedchain = (chain,)
2408 slicedchain = (chain,)
2397 else:
2409 else:
2398 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
2410 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
2399 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
2411 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
2400 rawchunks = getrawchunks(data, slicedchain)
2412 rawchunks = getrawchunks(data, slicedchain)
2401 bins = r._chunks(chain)
2413 bins = r._chunks(chain)
2402 text = bytes(bins[0])
2414 text = bytes(bins[0])
2403 bins = bins[1:]
2415 bins = bins[1:]
2404 text = mdiff.patches(text, bins)
2416 text = mdiff.patches(text, bins)
2405
2417
2406 benches = [
2418 benches = [
2407 (lambda: dorevision(), b'full'),
2419 (lambda: dorevision(), b'full'),
2408 (lambda: dodeltachain(rev), b'deltachain'),
2420 (lambda: dodeltachain(rev), b'deltachain'),
2409 (lambda: doread(chain), b'read'),
2421 (lambda: doread(chain), b'read'),
2410 ]
2422 ]
2411
2423
2412 if getattr(r, '_withsparseread', False):
2424 if getattr(r, '_withsparseread', False):
2413 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
2425 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
2414 benches.append(slicing)
2426 benches.append(slicing)
2415
2427
2416 benches.extend([
2428 benches.extend([
2417 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
2429 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
2418 (lambda: dodecompress(rawchunks), b'decompress'),
2430 (lambda: dodecompress(rawchunks), b'decompress'),
2419 (lambda: dopatch(text, bins), b'patch'),
2431 (lambda: dopatch(text, bins), b'patch'),
2420 (lambda: dohash(text), b'hash'),
2432 (lambda: dohash(text), b'hash'),
2421 ])
2433 ])
2422
2434
2423 timer, fm = gettimer(ui, opts)
2435 timer, fm = gettimer(ui, opts)
2424 for fn, title in benches:
2436 for fn, title in benches:
2425 timer(fn, title=title)
2437 timer(fn, title=title)
2426 fm.end()
2438 fm.end()
2427
2439
2428 @command(b'perfrevset',
2440 @command(b'perfrevset',
2429 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
2441 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
2430 (b'', b'contexts', False, b'obtain changectx for each revision')]
2442 (b'', b'contexts', False, b'obtain changectx for each revision')]
2431 + formatteropts, b"REVSET")
2443 + formatteropts, b"REVSET")
2432 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
2444 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
2433 """benchmark the execution time of a revset
2445 """benchmark the execution time of a revset
2434
2446
2435 Use the --clean option if need to evaluate the impact of build volatile
2447 Use the --clean option if need to evaluate the impact of build volatile
2436 revisions set cache on the revset execution. Volatile cache hold filtered
2448 revisions set cache on the revset execution. Volatile cache hold filtered
2437 and obsolete related cache."""
2449 and obsolete related cache."""
2438 opts = _byteskwargs(opts)
2450 opts = _byteskwargs(opts)
2439
2451
2440 timer, fm = gettimer(ui, opts)
2452 timer, fm = gettimer(ui, opts)
2441 def d():
2453 def d():
2442 if clear:
2454 if clear:
2443 repo.invalidatevolatilesets()
2455 repo.invalidatevolatilesets()
2444 if contexts:
2456 if contexts:
2445 for ctx in repo.set(expr): pass
2457 for ctx in repo.set(expr): pass
2446 else:
2458 else:
2447 for r in repo.revs(expr): pass
2459 for r in repo.revs(expr): pass
2448 timer(d)
2460 timer(d)
2449 fm.end()
2461 fm.end()
2450
2462
2451 @command(b'perfvolatilesets',
2463 @command(b'perfvolatilesets',
2452 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
2464 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
2453 ] + formatteropts)
2465 ] + formatteropts)
2454 def perfvolatilesets(ui, repo, *names, **opts):
2466 def perfvolatilesets(ui, repo, *names, **opts):
2455 """benchmark the computation of various volatile set
2467 """benchmark the computation of various volatile set
2456
2468
2457 Volatile set computes element related to filtering and obsolescence."""
2469 Volatile set computes element related to filtering and obsolescence."""
2458 opts = _byteskwargs(opts)
2470 opts = _byteskwargs(opts)
2459 timer, fm = gettimer(ui, opts)
2471 timer, fm = gettimer(ui, opts)
2460 repo = repo.unfiltered()
2472 repo = repo.unfiltered()
2461
2473
2462 def getobs(name):
2474 def getobs(name):
2463 def d():
2475 def d():
2464 repo.invalidatevolatilesets()
2476 repo.invalidatevolatilesets()
2465 if opts[b'clear_obsstore']:
2477 if opts[b'clear_obsstore']:
2466 clearfilecache(repo, b'obsstore')
2478 clearfilecache(repo, b'obsstore')
2467 obsolete.getrevs(repo, name)
2479 obsolete.getrevs(repo, name)
2468 return d
2480 return d
2469
2481
2470 allobs = sorted(obsolete.cachefuncs)
2482 allobs = sorted(obsolete.cachefuncs)
2471 if names:
2483 if names:
2472 allobs = [n for n in allobs if n in names]
2484 allobs = [n for n in allobs if n in names]
2473
2485
2474 for name in allobs:
2486 for name in allobs:
2475 timer(getobs(name), title=name)
2487 timer(getobs(name), title=name)
2476
2488
2477 def getfiltered(name):
2489 def getfiltered(name):
2478 def d():
2490 def d():
2479 repo.invalidatevolatilesets()
2491 repo.invalidatevolatilesets()
2480 if opts[b'clear_obsstore']:
2492 if opts[b'clear_obsstore']:
2481 clearfilecache(repo, b'obsstore')
2493 clearfilecache(repo, b'obsstore')
2482 repoview.filterrevs(repo, name)
2494 repoview.filterrevs(repo, name)
2483 return d
2495 return d
2484
2496
2485 allfilter = sorted(repoview.filtertable)
2497 allfilter = sorted(repoview.filtertable)
2486 if names:
2498 if names:
2487 allfilter = [n for n in allfilter if n in names]
2499 allfilter = [n for n in allfilter if n in names]
2488
2500
2489 for name in allfilter:
2501 for name in allfilter:
2490 timer(getfiltered(name), title=name)
2502 timer(getfiltered(name), title=name)
2491 fm.end()
2503 fm.end()
2492
2504
2493 @command(b'perfbranchmap',
2505 @command(b'perfbranchmap',
2494 [(b'f', b'full', False,
2506 [(b'f', b'full', False,
2495 b'Includes build time of subset'),
2507 b'Includes build time of subset'),
2496 (b'', b'clear-revbranch', False,
2508 (b'', b'clear-revbranch', False,
2497 b'purge the revbranch cache between computation'),
2509 b'purge the revbranch cache between computation'),
2498 ] + formatteropts)
2510 ] + formatteropts)
2499 def perfbranchmap(ui, repo, *filternames, **opts):
2511 def perfbranchmap(ui, repo, *filternames, **opts):
2500 """benchmark the update of a branchmap
2512 """benchmark the update of a branchmap
2501
2513
2502 This benchmarks the full repo.branchmap() call with read and write disabled
2514 This benchmarks the full repo.branchmap() call with read and write disabled
2503 """
2515 """
2504 opts = _byteskwargs(opts)
2516 opts = _byteskwargs(opts)
2505 full = opts.get(b"full", False)
2517 full = opts.get(b"full", False)
2506 clear_revbranch = opts.get(b"clear_revbranch", False)
2518 clear_revbranch = opts.get(b"clear_revbranch", False)
2507 timer, fm = gettimer(ui, opts)
2519 timer, fm = gettimer(ui, opts)
2508 def getbranchmap(filtername):
2520 def getbranchmap(filtername):
2509 """generate a benchmark function for the filtername"""
2521 """generate a benchmark function for the filtername"""
2510 if filtername is None:
2522 if filtername is None:
2511 view = repo
2523 view = repo
2512 else:
2524 else:
2513 view = repo.filtered(filtername)
2525 view = repo.filtered(filtername)
2514 if util.safehasattr(view._branchcaches, '_per_filter'):
2526 if util.safehasattr(view._branchcaches, '_per_filter'):
2515 filtered = view._branchcaches._per_filter
2527 filtered = view._branchcaches._per_filter
2516 else:
2528 else:
2517 # older versions
2529 # older versions
2518 filtered = view._branchcaches
2530 filtered = view._branchcaches
2519 def d():
2531 def d():
2520 if clear_revbranch:
2532 if clear_revbranch:
2521 repo.revbranchcache()._clear()
2533 repo.revbranchcache()._clear()
2522 if full:
2534 if full:
2523 view._branchcaches.clear()
2535 view._branchcaches.clear()
2524 else:
2536 else:
2525 filtered.pop(filtername, None)
2537 filtered.pop(filtername, None)
2526 view.branchmap()
2538 view.branchmap()
2527 return d
2539 return d
2528 # add filter in smaller subset to bigger subset
2540 # add filter in smaller subset to bigger subset
2529 possiblefilters = set(repoview.filtertable)
2541 possiblefilters = set(repoview.filtertable)
2530 if filternames:
2542 if filternames:
2531 possiblefilters &= set(filternames)
2543 possiblefilters &= set(filternames)
2532 subsettable = getbranchmapsubsettable()
2544 subsettable = getbranchmapsubsettable()
2533 allfilters = []
2545 allfilters = []
2534 while possiblefilters:
2546 while possiblefilters:
2535 for name in possiblefilters:
2547 for name in possiblefilters:
2536 subset = subsettable.get(name)
2548 subset = subsettable.get(name)
2537 if subset not in possiblefilters:
2549 if subset not in possiblefilters:
2538 break
2550 break
2539 else:
2551 else:
2540 assert False, b'subset cycle %s!' % possiblefilters
2552 assert False, b'subset cycle %s!' % possiblefilters
2541 allfilters.append(name)
2553 allfilters.append(name)
2542 possiblefilters.remove(name)
2554 possiblefilters.remove(name)
2543
2555
2544 # warm the cache
2556 # warm the cache
2545 if not full:
2557 if not full:
2546 for name in allfilters:
2558 for name in allfilters:
2547 repo.filtered(name).branchmap()
2559 repo.filtered(name).branchmap()
2548 if not filternames or b'unfiltered' in filternames:
2560 if not filternames or b'unfiltered' in filternames:
2549 # add unfiltered
2561 # add unfiltered
2550 allfilters.append(None)
2562 allfilters.append(None)
2551
2563
2552 if util.safehasattr(branchmap.branchcache, 'fromfile'):
2564 if util.safehasattr(branchmap.branchcache, 'fromfile'):
2553 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
2565 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
2554 branchcacheread.set(classmethod(lambda *args: None))
2566 branchcacheread.set(classmethod(lambda *args: None))
2555 else:
2567 else:
2556 # older versions
2568 # older versions
2557 branchcacheread = safeattrsetter(branchmap, b'read')
2569 branchcacheread = safeattrsetter(branchmap, b'read')
2558 branchcacheread.set(lambda *args: None)
2570 branchcacheread.set(lambda *args: None)
2559 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
2571 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
2560 branchcachewrite.set(lambda *args: None)
2572 branchcachewrite.set(lambda *args: None)
2561 try:
2573 try:
2562 for name in allfilters:
2574 for name in allfilters:
2563 printname = name
2575 printname = name
2564 if name is None:
2576 if name is None:
2565 printname = b'unfiltered'
2577 printname = b'unfiltered'
2566 timer(getbranchmap(name), title=str(printname))
2578 timer(getbranchmap(name), title=str(printname))
2567 finally:
2579 finally:
2568 branchcacheread.restore()
2580 branchcacheread.restore()
2569 branchcachewrite.restore()
2581 branchcachewrite.restore()
2570 fm.end()
2582 fm.end()
2571
2583
2572 @command(b'perfbranchmapupdate', [
2584 @command(b'perfbranchmapupdate', [
2573 (b'', b'base', [], b'subset of revision to start from'),
2585 (b'', b'base', [], b'subset of revision to start from'),
2574 (b'', b'target', [], b'subset of revision to end with'),
2586 (b'', b'target', [], b'subset of revision to end with'),
2575 (b'', b'clear-caches', False, b'clear cache between each runs')
2587 (b'', b'clear-caches', False, b'clear cache between each runs')
2576 ] + formatteropts)
2588 ] + formatteropts)
2577 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
2589 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
2578 """benchmark branchmap update from for <base> revs to <target> revs
2590 """benchmark branchmap update from for <base> revs to <target> revs
2579
2591
2580 If `--clear-caches` is passed, the following items will be reset before
2592 If `--clear-caches` is passed, the following items will be reset before
2581 each update:
2593 each update:
2582 * the changelog instance and associated indexes
2594 * the changelog instance and associated indexes
2583 * the rev-branch-cache instance
2595 * the rev-branch-cache instance
2584
2596
2585 Examples:
2597 Examples:
2586
2598
2587 # update for the one last revision
2599 # update for the one last revision
2588 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
2600 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
2589
2601
2590 $ update for change coming with a new branch
2602 $ update for change coming with a new branch
2591 $ hg perfbranchmapupdate --base 'stable' --target 'default'
2603 $ hg perfbranchmapupdate --base 'stable' --target 'default'
2592 """
2604 """
2593 from mercurial import branchmap
2605 from mercurial import branchmap
2594 from mercurial import repoview
2606 from mercurial import repoview
2595 opts = _byteskwargs(opts)
2607 opts = _byteskwargs(opts)
2596 timer, fm = gettimer(ui, opts)
2608 timer, fm = gettimer(ui, opts)
2597 clearcaches = opts[b'clear_caches']
2609 clearcaches = opts[b'clear_caches']
2598 unfi = repo.unfiltered()
2610 unfi = repo.unfiltered()
2599 x = [None] # used to pass data between closure
2611 x = [None] # used to pass data between closure
2600
2612
2601 # we use a `list` here to avoid possible side effect from smartset
2613 # we use a `list` here to avoid possible side effect from smartset
2602 baserevs = list(scmutil.revrange(repo, base))
2614 baserevs = list(scmutil.revrange(repo, base))
2603 targetrevs = list(scmutil.revrange(repo, target))
2615 targetrevs = list(scmutil.revrange(repo, target))
2604 if not baserevs:
2616 if not baserevs:
2605 raise error.Abort(b'no revisions selected for --base')
2617 raise error.Abort(b'no revisions selected for --base')
2606 if not targetrevs:
2618 if not targetrevs:
2607 raise error.Abort(b'no revisions selected for --target')
2619 raise error.Abort(b'no revisions selected for --target')
2608
2620
2609 # make sure the target branchmap also contains the one in the base
2621 # make sure the target branchmap also contains the one in the base
2610 targetrevs = list(set(baserevs) | set(targetrevs))
2622 targetrevs = list(set(baserevs) | set(targetrevs))
2611 targetrevs.sort()
2623 targetrevs.sort()
2612
2624
2613 cl = repo.changelog
2625 cl = repo.changelog
2614 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
2626 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
2615 allbaserevs.sort()
2627 allbaserevs.sort()
2616 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
2628 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
2617
2629
2618 newrevs = list(alltargetrevs.difference(allbaserevs))
2630 newrevs = list(alltargetrevs.difference(allbaserevs))
2619 newrevs.sort()
2631 newrevs.sort()
2620
2632
2621 allrevs = frozenset(unfi.changelog.revs())
2633 allrevs = frozenset(unfi.changelog.revs())
2622 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
2634 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
2623 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
2635 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
2624
2636
2625 def basefilter(repo, visibilityexceptions=None):
2637 def basefilter(repo, visibilityexceptions=None):
2626 return basefilterrevs
2638 return basefilterrevs
2627
2639
2628 def targetfilter(repo, visibilityexceptions=None):
2640 def targetfilter(repo, visibilityexceptions=None):
2629 return targetfilterrevs
2641 return targetfilterrevs
2630
2642
2631 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
2643 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
2632 ui.status(msg % (len(allbaserevs), len(newrevs)))
2644 ui.status(msg % (len(allbaserevs), len(newrevs)))
2633 if targetfilterrevs:
2645 if targetfilterrevs:
2634 msg = b'(%d revisions still filtered)\n'
2646 msg = b'(%d revisions still filtered)\n'
2635 ui.status(msg % len(targetfilterrevs))
2647 ui.status(msg % len(targetfilterrevs))
2636
2648
2637 try:
2649 try:
2638 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
2650 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
2639 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
2651 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
2640
2652
2641 baserepo = repo.filtered(b'__perf_branchmap_update_base')
2653 baserepo = repo.filtered(b'__perf_branchmap_update_base')
2642 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
2654 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
2643
2655
2644 # try to find an existing branchmap to reuse
2656 # try to find an existing branchmap to reuse
2645 subsettable = getbranchmapsubsettable()
2657 subsettable = getbranchmapsubsettable()
2646 candidatefilter = subsettable.get(None)
2658 candidatefilter = subsettable.get(None)
2647 while candidatefilter is not None:
2659 while candidatefilter is not None:
2648 candidatebm = repo.filtered(candidatefilter).branchmap()
2660 candidatebm = repo.filtered(candidatefilter).branchmap()
2649 if candidatebm.validfor(baserepo):
2661 if candidatebm.validfor(baserepo):
2650 filtered = repoview.filterrevs(repo, candidatefilter)
2662 filtered = repoview.filterrevs(repo, candidatefilter)
2651 missing = [r for r in allbaserevs if r in filtered]
2663 missing = [r for r in allbaserevs if r in filtered]
2652 base = candidatebm.copy()
2664 base = candidatebm.copy()
2653 base.update(baserepo, missing)
2665 base.update(baserepo, missing)
2654 break
2666 break
2655 candidatefilter = subsettable.get(candidatefilter)
2667 candidatefilter = subsettable.get(candidatefilter)
2656 else:
2668 else:
2657 # no suitable subset where found
2669 # no suitable subset where found
2658 base = branchmap.branchcache()
2670 base = branchmap.branchcache()
2659 base.update(baserepo, allbaserevs)
2671 base.update(baserepo, allbaserevs)
2660
2672
2661 def setup():
2673 def setup():
2662 x[0] = base.copy()
2674 x[0] = base.copy()
2663 if clearcaches:
2675 if clearcaches:
2664 unfi._revbranchcache = None
2676 unfi._revbranchcache = None
2665 clearchangelog(repo)
2677 clearchangelog(repo)
2666
2678
2667 def bench():
2679 def bench():
2668 x[0].update(targetrepo, newrevs)
2680 x[0].update(targetrepo, newrevs)
2669
2681
2670 timer(bench, setup=setup)
2682 timer(bench, setup=setup)
2671 fm.end()
2683 fm.end()
2672 finally:
2684 finally:
2673 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
2685 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
2674 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
2686 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
2675
2687
2676 @command(b'perfbranchmapload', [
2688 @command(b'perfbranchmapload', [
2677 (b'f', b'filter', b'', b'Specify repoview filter'),
2689 (b'f', b'filter', b'', b'Specify repoview filter'),
2678 (b'', b'list', False, b'List brachmap filter caches'),
2690 (b'', b'list', False, b'List brachmap filter caches'),
2679 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
2691 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
2680
2692
2681 ] + formatteropts)
2693 ] + formatteropts)
2682 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
2694 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
2683 """benchmark reading the branchmap"""
2695 """benchmark reading the branchmap"""
2684 opts = _byteskwargs(opts)
2696 opts = _byteskwargs(opts)
2685 clearrevlogs = opts[b'clear_revlogs']
2697 clearrevlogs = opts[b'clear_revlogs']
2686
2698
2687 if list:
2699 if list:
2688 for name, kind, st in repo.cachevfs.readdir(stat=True):
2700 for name, kind, st in repo.cachevfs.readdir(stat=True):
2689 if name.startswith(b'branch2'):
2701 if name.startswith(b'branch2'):
2690 filtername = name.partition(b'-')[2] or b'unfiltered'
2702 filtername = name.partition(b'-')[2] or b'unfiltered'
2691 ui.status(b'%s - %s\n'
2703 ui.status(b'%s - %s\n'
2692 % (filtername, util.bytecount(st.st_size)))
2704 % (filtername, util.bytecount(st.st_size)))
2693 return
2705 return
2694 if not filter:
2706 if not filter:
2695 filter = None
2707 filter = None
2696 subsettable = getbranchmapsubsettable()
2708 subsettable = getbranchmapsubsettable()
2697 if filter is None:
2709 if filter is None:
2698 repo = repo.unfiltered()
2710 repo = repo.unfiltered()
2699 else:
2711 else:
2700 repo = repoview.repoview(repo, filter)
2712 repo = repoview.repoview(repo, filter)
2701
2713
2702 repo.branchmap() # make sure we have a relevant, up to date branchmap
2714 repo.branchmap() # make sure we have a relevant, up to date branchmap
2703
2715
2704 try:
2716 try:
2705 fromfile = branchmap.branchcache.fromfile
2717 fromfile = branchmap.branchcache.fromfile
2706 except AttributeError:
2718 except AttributeError:
2707 # older versions
2719 # older versions
2708 fromfile = branchmap.read
2720 fromfile = branchmap.read
2709
2721
2710 currentfilter = filter
2722 currentfilter = filter
2711 # try once without timer, the filter may not be cached
2723 # try once without timer, the filter may not be cached
2712 while fromfile(repo) is None:
2724 while fromfile(repo) is None:
2713 currentfilter = subsettable.get(currentfilter)
2725 currentfilter = subsettable.get(currentfilter)
2714 if currentfilter is None:
2726 if currentfilter is None:
2715 raise error.Abort(b'No branchmap cached for %s repo'
2727 raise error.Abort(b'No branchmap cached for %s repo'
2716 % (filter or b'unfiltered'))
2728 % (filter or b'unfiltered'))
2717 repo = repo.filtered(currentfilter)
2729 repo = repo.filtered(currentfilter)
2718 timer, fm = gettimer(ui, opts)
2730 timer, fm = gettimer(ui, opts)
2719 def setup():
2731 def setup():
2720 if clearrevlogs:
2732 if clearrevlogs:
2721 clearchangelog(repo)
2733 clearchangelog(repo)
2722 def bench():
2734 def bench():
2723 fromfile(repo)
2735 fromfile(repo)
2724 timer(bench, setup=setup)
2736 timer(bench, setup=setup)
2725 fm.end()
2737 fm.end()
2726
2738
2727 @command(b'perfloadmarkers')
2739 @command(b'perfloadmarkers')
2728 def perfloadmarkers(ui, repo):
2740 def perfloadmarkers(ui, repo):
2729 """benchmark the time to parse the on-disk markers for a repo
2741 """benchmark the time to parse the on-disk markers for a repo
2730
2742
2731 Result is the number of markers in the repo."""
2743 Result is the number of markers in the repo."""
2732 timer, fm = gettimer(ui)
2744 timer, fm = gettimer(ui)
2733 svfs = getsvfs(repo)
2745 svfs = getsvfs(repo)
2734 timer(lambda: len(obsolete.obsstore(svfs)))
2746 timer(lambda: len(obsolete.obsstore(svfs)))
2735 fm.end()
2747 fm.end()
2736
2748
2737 @command(b'perflrucachedict', formatteropts +
2749 @command(b'perflrucachedict', formatteropts +
2738 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
2750 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
2739 (b'', b'mincost', 0, b'smallest cost of items in cache'),
2751 (b'', b'mincost', 0, b'smallest cost of items in cache'),
2740 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
2752 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
2741 (b'', b'size', 4, b'size of cache'),
2753 (b'', b'size', 4, b'size of cache'),
2742 (b'', b'gets', 10000, b'number of key lookups'),
2754 (b'', b'gets', 10000, b'number of key lookups'),
2743 (b'', b'sets', 10000, b'number of key sets'),
2755 (b'', b'sets', 10000, b'number of key sets'),
2744 (b'', b'mixed', 10000, b'number of mixed mode operations'),
2756 (b'', b'mixed', 10000, b'number of mixed mode operations'),
2745 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
2757 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
2746 norepo=True)
2758 norepo=True)
2747 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
2759 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
2748 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
2760 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
2749 opts = _byteskwargs(opts)
2761 opts = _byteskwargs(opts)
2750
2762
2751 def doinit():
2763 def doinit():
2752 for i in _xrange(10000):
2764 for i in _xrange(10000):
2753 util.lrucachedict(size)
2765 util.lrucachedict(size)
2754
2766
2755 costrange = list(range(mincost, maxcost + 1))
2767 costrange = list(range(mincost, maxcost + 1))
2756
2768
2757 values = []
2769 values = []
2758 for i in _xrange(size):
2770 for i in _xrange(size):
2759 values.append(random.randint(0, _maxint))
2771 values.append(random.randint(0, _maxint))
2760
2772
2761 # Get mode fills the cache and tests raw lookup performance with no
2773 # Get mode fills the cache and tests raw lookup performance with no
2762 # eviction.
2774 # eviction.
2763 getseq = []
2775 getseq = []
2764 for i in _xrange(gets):
2776 for i in _xrange(gets):
2765 getseq.append(random.choice(values))
2777 getseq.append(random.choice(values))
2766
2778
2767 def dogets():
2779 def dogets():
2768 d = util.lrucachedict(size)
2780 d = util.lrucachedict(size)
2769 for v in values:
2781 for v in values:
2770 d[v] = v
2782 d[v] = v
2771 for key in getseq:
2783 for key in getseq:
2772 value = d[key]
2784 value = d[key]
2773 value # silence pyflakes warning
2785 value # silence pyflakes warning
2774
2786
2775 def dogetscost():
2787 def dogetscost():
2776 d = util.lrucachedict(size, maxcost=costlimit)
2788 d = util.lrucachedict(size, maxcost=costlimit)
2777 for i, v in enumerate(values):
2789 for i, v in enumerate(values):
2778 d.insert(v, v, cost=costs[i])
2790 d.insert(v, v, cost=costs[i])
2779 for key in getseq:
2791 for key in getseq:
2780 try:
2792 try:
2781 value = d[key]
2793 value = d[key]
2782 value # silence pyflakes warning
2794 value # silence pyflakes warning
2783 except KeyError:
2795 except KeyError:
2784 pass
2796 pass
2785
2797
2786 # Set mode tests insertion speed with cache eviction.
2798 # Set mode tests insertion speed with cache eviction.
2787 setseq = []
2799 setseq = []
2788 costs = []
2800 costs = []
2789 for i in _xrange(sets):
2801 for i in _xrange(sets):
2790 setseq.append(random.randint(0, _maxint))
2802 setseq.append(random.randint(0, _maxint))
2791 costs.append(random.choice(costrange))
2803 costs.append(random.choice(costrange))
2792
2804
2793 def doinserts():
2805 def doinserts():
2794 d = util.lrucachedict(size)
2806 d = util.lrucachedict(size)
2795 for v in setseq:
2807 for v in setseq:
2796 d.insert(v, v)
2808 d.insert(v, v)
2797
2809
2798 def doinsertscost():
2810 def doinsertscost():
2799 d = util.lrucachedict(size, maxcost=costlimit)
2811 d = util.lrucachedict(size, maxcost=costlimit)
2800 for i, v in enumerate(setseq):
2812 for i, v in enumerate(setseq):
2801 d.insert(v, v, cost=costs[i])
2813 d.insert(v, v, cost=costs[i])
2802
2814
2803 def dosets():
2815 def dosets():
2804 d = util.lrucachedict(size)
2816 d = util.lrucachedict(size)
2805 for v in setseq:
2817 for v in setseq:
2806 d[v] = v
2818 d[v] = v
2807
2819
2808 # Mixed mode randomly performs gets and sets with eviction.
2820 # Mixed mode randomly performs gets and sets with eviction.
2809 mixedops = []
2821 mixedops = []
2810 for i in _xrange(mixed):
2822 for i in _xrange(mixed):
2811 r = random.randint(0, 100)
2823 r = random.randint(0, 100)
2812 if r < mixedgetfreq:
2824 if r < mixedgetfreq:
2813 op = 0
2825 op = 0
2814 else:
2826 else:
2815 op = 1
2827 op = 1
2816
2828
2817 mixedops.append((op,
2829 mixedops.append((op,
2818 random.randint(0, size * 2),
2830 random.randint(0, size * 2),
2819 random.choice(costrange)))
2831 random.choice(costrange)))
2820
2832
2821 def domixed():
2833 def domixed():
2822 d = util.lrucachedict(size)
2834 d = util.lrucachedict(size)
2823
2835
2824 for op, v, cost in mixedops:
2836 for op, v, cost in mixedops:
2825 if op == 0:
2837 if op == 0:
2826 try:
2838 try:
2827 d[v]
2839 d[v]
2828 except KeyError:
2840 except KeyError:
2829 pass
2841 pass
2830 else:
2842 else:
2831 d[v] = v
2843 d[v] = v
2832
2844
2833 def domixedcost():
2845 def domixedcost():
2834 d = util.lrucachedict(size, maxcost=costlimit)
2846 d = util.lrucachedict(size, maxcost=costlimit)
2835
2847
2836 for op, v, cost in mixedops:
2848 for op, v, cost in mixedops:
2837 if op == 0:
2849 if op == 0:
2838 try:
2850 try:
2839 d[v]
2851 d[v]
2840 except KeyError:
2852 except KeyError:
2841 pass
2853 pass
2842 else:
2854 else:
2843 d.insert(v, v, cost=cost)
2855 d.insert(v, v, cost=cost)
2844
2856
2845 benches = [
2857 benches = [
2846 (doinit, b'init'),
2858 (doinit, b'init'),
2847 ]
2859 ]
2848
2860
2849 if costlimit:
2861 if costlimit:
2850 benches.extend([
2862 benches.extend([
2851 (dogetscost, b'gets w/ cost limit'),
2863 (dogetscost, b'gets w/ cost limit'),
2852 (doinsertscost, b'inserts w/ cost limit'),
2864 (doinsertscost, b'inserts w/ cost limit'),
2853 (domixedcost, b'mixed w/ cost limit'),
2865 (domixedcost, b'mixed w/ cost limit'),
2854 ])
2866 ])
2855 else:
2867 else:
2856 benches.extend([
2868 benches.extend([
2857 (dogets, b'gets'),
2869 (dogets, b'gets'),
2858 (doinserts, b'inserts'),
2870 (doinserts, b'inserts'),
2859 (dosets, b'sets'),
2871 (dosets, b'sets'),
2860 (domixed, b'mixed')
2872 (domixed, b'mixed')
2861 ])
2873 ])
2862
2874
2863 for fn, title in benches:
2875 for fn, title in benches:
2864 timer, fm = gettimer(ui, opts)
2876 timer, fm = gettimer(ui, opts)
2865 timer(fn, title=title)
2877 timer(fn, title=title)
2866 fm.end()
2878 fm.end()
2867
2879
2868 @command(b'perfwrite', formatteropts)
2880 @command(b'perfwrite', formatteropts)
2869 def perfwrite(ui, repo, **opts):
2881 def perfwrite(ui, repo, **opts):
2870 """microbenchmark ui.write
2882 """microbenchmark ui.write
2871 """
2883 """
2872 opts = _byteskwargs(opts)
2884 opts = _byteskwargs(opts)
2873
2885
2874 timer, fm = gettimer(ui, opts)
2886 timer, fm = gettimer(ui, opts)
2875 def write():
2887 def write():
2876 for i in range(100000):
2888 for i in range(100000):
2877 ui.write((b'Testing write performance\n'))
2889 ui.write((b'Testing write performance\n'))
2878 timer(write)
2890 timer(write)
2879 fm.end()
2891 fm.end()
2880
2892
2881 def uisetup(ui):
2893 def uisetup(ui):
2882 if (util.safehasattr(cmdutil, b'openrevlog') and
2894 if (util.safehasattr(cmdutil, b'openrevlog') and
2883 not util.safehasattr(commands, b'debugrevlogopts')):
2895 not util.safehasattr(commands, b'debugrevlogopts')):
2884 # for "historical portability":
2896 # for "historical portability":
2885 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
2897 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
2886 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
2898 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
2887 # openrevlog() should cause failure, because it has been
2899 # openrevlog() should cause failure, because it has been
2888 # available since 3.5 (or 49c583ca48c4).
2900 # available since 3.5 (or 49c583ca48c4).
2889 def openrevlog(orig, repo, cmd, file_, opts):
2901 def openrevlog(orig, repo, cmd, file_, opts):
2890 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
2902 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
2891 raise error.Abort(b"This version doesn't support --dir option",
2903 raise error.Abort(b"This version doesn't support --dir option",
2892 hint=b"use 3.5 or later")
2904 hint=b"use 3.5 or later")
2893 return orig(repo, cmd, file_, opts)
2905 return orig(repo, cmd, file_, opts)
2894 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
2906 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
2895
2907
2896 @command(b'perfprogress', formatteropts + [
2908 @command(b'perfprogress', formatteropts + [
2897 (b'', b'topic', b'topic', b'topic for progress messages'),
2909 (b'', b'topic', b'topic', b'topic for progress messages'),
2898 (b'c', b'total', 1000000, b'total value we are progressing to'),
2910 (b'c', b'total', 1000000, b'total value we are progressing to'),
2899 ], norepo=True)
2911 ], norepo=True)
2900 def perfprogress(ui, topic=None, total=None, **opts):
2912 def perfprogress(ui, topic=None, total=None, **opts):
2901 """printing of progress bars"""
2913 """printing of progress bars"""
2902 opts = _byteskwargs(opts)
2914 opts = _byteskwargs(opts)
2903
2915
2904 timer, fm = gettimer(ui, opts)
2916 timer, fm = gettimer(ui, opts)
2905
2917
2906 def doprogress():
2918 def doprogress():
2907 with ui.makeprogress(topic, total=total) as progress:
2919 with ui.makeprogress(topic, total=total) as progress:
2908 for i in pycompat.xrange(total):
2920 for i in pycompat.xrange(total):
2909 progress.increment()
2921 progress.increment()
2910
2922
2911 timer(doprogress)
2923 timer(doprogress)
2912 fm.end()
2924 fm.end()
General Comments 0
You need to be logged in to leave comments. Login now